repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
dungvtdev/upsbayescpm
|
bayespy/inference/vmp/nodes/binomial.py
|
2
|
6038
|
################################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
A module for the binomial distribution node
"""
import numpy as np
import scipy.special as special
from .expfamily import (ExponentialFamily,
ExponentialFamilyDistribution,
useconstructor)
from .beta import BetaMoments
from .poisson import PoissonMoments
from .node import (Moments,
ensureparents)
from bayespy.utils import misc, random
class BinomialMoments(PoissonMoments):
"""
Class for the moments of binomial variables
"""
def __init__(self, N):
self.N = N
super().__init__()
def compute_fixed_moments(self, x):
"""
Compute the moments for a fixed value
"""
# Make sure the values are integers in valid range
x = np.asanyarray(x)
if np.any(x > self.N):
raise ValueError("Invalid count")
return super().compute_fixed_moments()
def compute_dims_from_values(self, x):
"""
Return the shape of the moments for a fixed value.
The realizations are scalars, thus the shape of the moment is ().
"""
raise DeprecationWarning()
return super().compute_dims_from_values()
class BinomialDistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of binomial variables.
"""
def __init__(self, N):
N = np.asanyarray(N)
if not misc.isinteger(N):
raise ValueError("Number of trials must be integer")
if np.any(N < 0):
raise ValueError("Number of trials must be non-negative")
self.N = np.asanyarray(N)
super().__init__()
def compute_message_to_parent(self, parent, index, u_self, u_p):
"""
Compute the message to a parent node.
"""
if index == 0:
x = u_self[0][...,None]
n = self.N[...,None]
m0 = x*[1, -1] + n*[0, 1]
m = [m0]
return m
else:
raise ValueError("Incorrect parent index")
def compute_phi_from_parents(self, u_p, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
logp0 = u_p[0][...,0]
logp1 = u_p[0][...,1]
phi0 = logp0 - logp1
return [phi0]
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
u0 = self.N / (1 + np.exp(-phi[0]))
g = -self.N * np.log1p(np.exp(phi[0]))
return ( [u0], g )
def compute_cgf_from_parents(self, u_p):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
logp0 = u_p[0][...,0]
logp1 = u_p[0][...,1]
return self.N * logp1
def compute_fixed_moments_and_f(self, x, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
# Make sure the values are integers in valid range
x = np.asanyarray(x)
if not misc.isinteger(x):
raise ValueError("Counts must be integer")
if np.any(x < 0) or np.any(x > self.N):
raise ValueError("Invalid count")
# Now, the moments are just the counts
u = [x]
f = (special.gammaln(self.N+1) -
special.gammaln(x+1) -
special.gammaln(self.N-x+1))
return (u, f)
def random(self, *phi, plates=None):
"""
Draw a random sample from the distribution.
"""
p = random.logodds_to_probability(phi[0])
return np.random.binomial(self.N, p, size=plates)
class Binomial(ExponentialFamily):
r"""
Node for binomial random variables.
The node models the number of successes :math:`x \in \{0, \ldots, n\}` in
:math:`n` trials with probability :math:`p` for success:
.. math::
x \sim \mathrm{Binomial}(n, p).
Parameters
----------
n : scalar or array
Number of trials
p : beta-like node or scalar or array
Probability of a success in a trial
Examples
--------
>>> import warnings
>>> warnings.filterwarnings('ignore', category=RuntimeWarning)
>>> from bayespy.nodes import Binomial, Beta
>>> p = Beta([1e-3, 1e-3])
>>> x = Binomial(10, p)
>>> x.observe(7)
>>> p.update()
>>> import bayespy.plot as bpplt
>>> import numpy as np
>>> bpplt.pdf(p, np.linspace(0, 1, num=100))
[<matplotlib.lines.Line2D object at 0x...>]
See also
--------
Bernoulli, Multinomial, Beta
"""
def __init__(self, n, p, **kwargs):
"""
Create binomial node
"""
super().__init__(n, p, **kwargs)
@classmethod
def _constructor(cls, n, p, **kwargs):
"""
Constructs distribution and moments objects.
"""
p = cls._ensure_moments(p, BetaMoments)
parents = [p]
moments = BinomialMoments(n)
parent_moments = (p._moments,)
distribution = BinomialDistribution(n)
return ( parents,
kwargs,
( (), ),
cls._total_plates(kwargs.get('plates'),
distribution.plates_from_parent(0, p.plates),
np.shape(n)),
distribution,
moments,
parent_moments)
def __str__(self):
"""
Print the distribution using standard parameterization.
"""
p = 1 / (1 + np.exp(-self.phi[0]))
n = self._distribution.N
return ("%s ~ Binomial(n, p)\n"
" n = \n"
"%s\n"
" p = \n"
"%s\n"
% (self.name, n, p))
|
mit
|
clemkoa/scikit-learn
|
sklearn/neighbors/unsupervised.py
|
8
|
4744
|
"""Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
|
bsd-3-clause
|
sem-geologist/hyperspy
|
hyperspy/_signals/signal1d.py
|
2
|
58416
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
import math
import matplotlib.pyplot as plt
import numpy as np
import dask.array as da
import scipy.interpolate
import scipy as sp
from scipy.signal import savgol_filter
from scipy.ndimage.filters import gaussian_filter1d
try:
from statsmodels.nonparametric.smoothers_lowess import lowess
statsmodels_installed = True
except BaseException:
statsmodels_installed = False
from hyperspy.signal import BaseSignal
from hyperspy._signals.common_signal1d import CommonSignal1D
from hyperspy.signal_tools import SpikesRemoval
from hyperspy.models.model1d import Model1D
from hyperspy.misc.utils import signal_range_from_roi
from hyperspy.defaults_parser import preferences
from hyperspy.signal_tools import (
Signal1DCalibration,
SmoothingSavitzkyGolay,
SmoothingLowess,
SmoothingTV,
ButterworthFilter)
from hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT
from hyperspy.misc.tv_denoise import _tv_denoise_1d
from hyperspy.signal_tools import BackgroundRemoval
from hyperspy.decorators import interactive_range_selector
from hyperspy.signal_tools import IntegrateArea
from hyperspy import components1d
from hyperspy._signals.lazy import LazySignal
from hyperspy.docstrings.signal1d import CROP_PARAMETER_DOC
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG, PARALLEL_ARG
from hyperspy.misc.test_utils import ignore_warning
_logger = logging.getLogger(__name__)
def find_peaks_ohaver(y, x=None, slope_thresh=0., amp_thresh=None,
medfilt_radius=5, maxpeakn=30000, peakgroup=10,
subchannel=True,):
"""Find peaks along a 1D line.
Function to locate the positive peaks in a noisy x-y data set.
Detects peaks by looking for downward zero-crossings in the first
derivative that exceed 'slope_thresh'.
Returns an array containing position, height, and width of each peak.
Sorted by position.
'slope_thresh' and 'amp_thresh', control sensitivity: higher values
will neglect wider peaks (slope) and smaller features (amp),
respectively.
Parameters
----------
y : array
1D input array, e.g. a spectrum
x : array (optional)
1D array describing the calibration of y (must have same shape as y)
slope_thresh : float (optional)
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float (optional)
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10% of max(y).
medfilt_radius : int (optional)
median filter window to apply to smooth the data
(see scipy.signal.medfilt);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int (optional)
number of points around the "top part" of the peak that
are taken to estimate the peak height; for spikes or
very narrow peaks, keep PeakGroup=1 or 2; for broad or
noisy peaks, make PeakGroup larger to reduce the effect
of noise;
default is set to 10.
maxpeakn : int (optional)
number of maximum detectable peaks;
default is set to 30000.
subchannel : bool (optional)
default is set to True.
Returns
-------
P : structured array of shape (npeaks)
contains fields: 'position', 'width', and 'height' for each peak.
Examples
--------
>>> x = np.arange(0,50,0.01)
>>> y = np.cos(x)
>>> peaks = find_peaks_ohaver(y, x, 0, 0)
Notes
-----
Original code from T. C. O'Haver, 1995.
Version 2 Last revised Oct 27, 2006 Converted to Python by
Michael Sarahan, Feb 2011.
Revised to handle edges better. MCS, Mar 2011
"""
if x is None:
x = np.arange(len(y), dtype=np.int64)
if not amp_thresh:
amp_thresh = 0.1 * y.max()
peakgroup = np.round(peakgroup)
if medfilt_radius:
d = np.gradient(scipy.signal.medfilt(y, medfilt_radius))
else:
d = np.gradient(y)
n = np.round(peakgroup / 2 + 1)
peak_dt = np.dtype([('position', np.float),
('height', np.float),
('width', np.float)])
P = np.array([], dtype=peak_dt)
peak = 0
for j in range(len(y) - 4):
if np.sign(d[j]) > np.sign(d[j + 1]): # Detects zero-crossing
if np.sign(d[j + 1]) == 0:
continue
# if slope of derivative is larger than slope_thresh
if d[j] - d[j + 1] > slope_thresh:
# if height of peak is larger than amp_thresh
if y[j] > amp_thresh:
# the next section is very slow, and actually messes
# things up for images (discrete pixels),
# so by default, don't do subchannel precision in the
# 1D peakfind step.
if subchannel:
xx = np.zeros(peakgroup)
yy = np.zeros(peakgroup)
s = 0
for k in range(peakgroup):
groupindex = int(j + k - n + 1)
if groupindex < 1:
xx = xx[1:]
yy = yy[1:]
s += 1
continue
elif groupindex > y.shape[0] - 1:
xx = xx[:groupindex - 1]
yy = yy[:groupindex - 1]
break
xx[k - s] = x[groupindex]
yy[k - s] = y[groupindex]
avg = np.average(xx)
stdev = np.std(xx)
xxf = (xx - avg) / stdev
# Fit parabola to log10 of sub-group with
# centering and scaling
yynz = yy != 0
coef = np.polyfit(
xxf[yynz], np.log10(np.abs(yy[yynz])), 2)
c1 = coef[2]
c2 = coef[1]
c3 = coef[0]
with np.errstate(invalid='ignore'):
width = np.linalg.norm(stdev * 2.35703 /
(np.sqrt(2) * np.sqrt(-1 *
c3)))
# if the peak is too narrow for least-squares
# technique to work well, just use the max value
# of y in the sub-group of points near peak.
if peakgroup < 7:
height = np.max(yy)
position = xx[np.argmin(np.abs(yy - height))]
else:
position = - ((stdev * c2 / (2 * c3)) - avg)
height = np.exp(c1 - c3 * (c2 / (2 * c3)) ** 2)
# Fill results array P. One row for each peak
# detected, containing the
# peak position (x-value) and peak height (y-value).
else:
position = x[j]
height = y[j]
# no way to know peak width without
# the above measurements.
width = 0
if (not np.isnan(position) and 0 < position < x[-1]):
P = np.hstack((P,
np.array([(position, height, width)],
dtype=peak_dt)))
peak += 1
# return only the part of the array that contains peaks
# (not the whole maxpeakn x 3 array)
if len(P) > maxpeakn:
minh = np.sort(P['height'])[-maxpeakn]
P = P[P['height'] >= minh]
# Sorts the values as a function of position
P.sort(0)
return P
def interpolate1D(number_of_interpolation_points, data):
ip = number_of_interpolation_points
ch = len(data)
old_ax = np.linspace(0, 100, ch)
new_ax = np.linspace(0, 100, ch * ip - (ip - 1))
interpolator = scipy.interpolate.interp1d(old_ax, data)
return interpolator(new_ax)
def _estimate_shift1D(data, **kwargs):
mask = kwargs.get('mask', None)
ref = kwargs.get('ref', None)
interpolate = kwargs.get('interpolate', True)
ip = kwargs.get('ip', 5)
data_slice = kwargs.get('data_slice', slice(None))
if bool(mask):
# asarray is required for consistensy as argmax
# returns a numpy scalar array
return np.asarray(np.nan)
data = data[data_slice]
if interpolate is True:
data = interpolate1D(ip, data)
return np.argmax(np.correlate(ref, data, 'full')) - len(ref) + 1
def _shift1D(data, **kwargs):
shift = kwargs.get('shift', 0.)
original_axis = kwargs.get('original_axis', None)
fill_value = kwargs.get('fill_value', np.nan)
kind = kwargs.get('kind', 'linear')
offset = kwargs.get('offset', 0.)
scale = kwargs.get('scale', 1.)
size = kwargs.get('size', 2)
if np.isnan(shift) or shift == 0:
return data
axis = np.linspace(offset, offset + scale * (size - 1), size)
si = sp.interpolate.interp1d(original_axis,
data,
bounds_error=False,
fill_value=fill_value,
kind=kind)
offset = float(offset - shift)
axis = np.linspace(offset, offset + scale * (size - 1), size)
return si(axis)
class Signal1D(BaseSignal, CommonSignal1D):
"""
"""
_signal_dimension = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.axes_manager.signal_dimension != 1:
self.axes_manager.set_signal_dimension(1)
def _spikes_diagnosis(self, signal_mask=None,
navigation_mask=None):
"""Plots a histogram to help in choosing the threshold for
spikes removal.
Parameters
----------
signal_mask : boolean array
Restricts the operation to the signal locations not marked
as True (masked)
navigation_mask : boolean array
Restricts the operation to the navigation locations not
marked as True (masked).
See also
--------
spikes_removal_tool
"""
self._check_signal_dimension_equals_one()
dc = self.data
if signal_mask is not None:
dc = dc[..., ~signal_mask]
if navigation_mask is not None:
dc = dc[~navigation_mask, :]
der = np.abs(np.diff(dc, 1, -1))
n = ((~navigation_mask).sum() if navigation_mask else
self.axes_manager.navigation_size)
# arbitrary cutoff for number of spectra necessary before histogram
# data is compressed by finding maxima of each spectrum
tmp = BaseSignal(der) if n < 2000 else BaseSignal(
np.ravel(der.max(-1)))
# get histogram signal using smart binning and plot
tmph = tmp.get_histogram()
tmph.plot()
# Customize plot appearance
plt.gca().set_title('')
plt.gca().fill_between(tmph.axes_manager[0].axis,
tmph.data,
facecolor='#fddbc7',
interpolate=True,
color='none')
ax = tmph._plot.signal_plot.ax
axl = tmph._plot.signal_plot.ax_lines[0]
axl.set_line_properties(color='#b2182b')
plt.xlabel('Derivative magnitude')
plt.ylabel('Log(Counts)')
ax.set_yscale('log')
ax.set_ylim(10 ** -1, plt.ylim()[1])
ax.set_xlim(plt.xlim()[0], 1.1 * plt.xlim()[1])
plt.draw()
def spikes_removal_tool(self, signal_mask=None,
navigation_mask=None, display=True, toolkit=None):
"""Graphical interface to remove spikes from EELS spectra.
Parameters
----------
signal_mask : boolean array
Restricts the operation to the signal locations not marked
as True (masked)
navigation_mask : boolean array
Restricts the operation to the navigation locations not
marked as True (masked)
%s
%s
See also
--------
`_spikes_diagnosis`
"""
self._check_signal_dimension_equals_one()
sr = SpikesRemoval(self,
navigation_mask=navigation_mask,
signal_mask=signal_mask)
return sr.gui(display=display, toolkit=toolkit)
spikes_removal_tool.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def create_model(self, dictionary=None):
"""Create a model for the current data.
Returns
-------
model : `Model1D` instance.
"""
model = Model1D(self, dictionary=dictionary)
return model
def shift1D(self,
shift_array,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
parallel=None,
show_progressbar=None):
"""Shift the data in place over the signal axis by the amount specified
by an array.
Parameters
----------
shift_array : numpy array
An array containing the shifting amount. It must have
`axes_manager._navigation_shape_in_array` shape.
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if not np.any(shift_array):
# Nothing to do, the shift array if filled with zeros
return
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
# Figure out min/max shifts, and translate to shifts in index as well
minimum, maximum = np.nanmin(shift_array), np.nanmax(shift_array)
if minimum < 0:
ihigh = 1 + axis.value2index(
axis.high_value + minimum,
rounding=math.floor)
else:
ihigh = axis.high_index + 1
if maximum > 0:
ilow = axis.value2index(axis.offset + maximum,
rounding=math.ceil)
else:
ilow = axis.low_index
if expand:
if self._lazy:
ind = axis.index_in_array
pre_shape = list(self.data.shape)
post_shape = list(self.data.shape)
pre_chunks = list(self.data.chunks)
post_chunks = list(self.data.chunks)
pre_shape[ind] = axis.high_index - ihigh + 1
post_shape[ind] = ilow - axis.low_index
for chunks, shape in zip((pre_chunks, post_chunks),
(pre_shape, post_shape)):
maxsize = min(np.max(chunks[ind]), shape[ind])
num = np.ceil(shape[ind] / maxsize)
chunks[ind] = tuple(len(ar) for ar in
np.array_split(np.arange(shape[ind]),
num))
pre_array = da.full(tuple(pre_shape),
fill_value,
chunks=tuple(pre_chunks))
post_array = da.full(tuple(post_shape),
fill_value,
chunks=tuple(post_chunks))
self.data = da.concatenate((pre_array, self.data, post_array),
axis=ind)
else:
padding = []
for i in range(self.data.ndim):
if i == axis.index_in_array:
padding.append((axis.high_index - ihigh + 1,
ilow - axis.low_index))
else:
padding.append((0, 0))
self.data = np.pad(self.data, padding, mode='constant',
constant_values=(fill_value,))
axis.offset += minimum
axis.size += axis.high_index - ihigh + 1 + ilow - axis.low_index
self._map_iterate(_shift1D, (('shift', shift_array.ravel()),),
original_axis=axis.axis,
fill_value=fill_value,
kind=interpolation_method,
offset=axis.offset,
scale=axis.scale,
size=axis.size,
show_progressbar=show_progressbar,
parallel=parallel,
ragged=False)
if crop and not expand:
_logger.debug("Cropping %s from index %i to %i"
% (self, ilow, ihigh))
self.crop(axis.index_in_axes_manager,
ilow,
ihigh)
self.events.data_changed.trigger(obj=self)
shift1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG, PARALLEL_ARG)
def interpolate_in_between(self, start, end, delta=3, parallel=None,
show_progressbar=None, **kwargs):
"""Replace the data in a given range by interpolation.
The operation is performed in place.
Parameters
----------
start, end : int or float
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
delta : int or float
The windows around the (start, end) to use for interpolation
%s
%s
All extra keyword arguments are passed to
`scipy.interpolate.interp1d`. See the function documentation
for details.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
i1 = axis._get_index(start)
i2 = axis._get_index(end)
if isinstance(delta, float):
delta = int(delta / axis.scale)
i0 = int(np.clip(i1 - delta, 0, np.inf))
i3 = int(np.clip(i2 + delta, 0, axis.size))
def interpolating_function(dat):
dat_int = sp.interpolate.interp1d(
list(range(i0, i1)) + list(range(i2, i3)),
dat[i0:i1].tolist() + dat[i2:i3].tolist(),
**kwargs)
dat[i1:i2] = dat_int(list(range(i1, i2)))
return dat
self._map_iterate(interpolating_function, ragged=False,
parallel=parallel, show_progressbar=show_progressbar)
self.events.data_changed.trigger(obj=self)
interpolate_in_between.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG)
def _check_navigation_mask(self, mask):
if mask is not None:
if not isinstance(mask, BaseSignal):
raise ValueError("mask must be a BaseSignal instance.")
elif mask.axes_manager.signal_dimension not in (0, 1):
raise ValueError("mask must be a BaseSignal "
"with signal_dimension equal to 1")
elif (mask.axes_manager.navigation_dimension !=
self.axes_manager.navigation_dimension):
raise ValueError("mask must be a BaseSignal with the same "
"navigation_dimension as the current signal.")
def estimate_shift1D(self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
mask=None,
show_progressbar=None,
parallel=None):
"""Estimate the shifts in the current signal axis using
cross-correlation.
This method can only estimate the shift by comparing
unidimensional features that should not change the position in
the signal axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient to
select the feature of interest providing sensible values for
`start` and `end`. By default interpolation is used to obtain
subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
mask : `BaseSignal` of bool.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
%s
Returns
-------
An array with the result of the estimation in the axis units. \
Although the computation is performed in batches if the signal is \
lazy, the result is computed in memory because it depends on the \
current state of the axes that could change later on in the workflow.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
ip = number_of_interpolation_points + 1
axis = self.axes_manager.signal_axes[0]
self._check_navigation_mask(mask)
# we compute for now
if isinstance(start, da.Array):
start = start.compute()
if isinstance(end, da.Array):
end = end.compute()
i1, i2 = axis._get_index(start), axis._get_index(end)
if reference_indices is None:
reference_indices = self.axes_manager.indices
ref = self.inav[reference_indices].data[i1:i2]
if interpolate is True:
ref = interpolate1D(ip, ref)
iterating_kwargs = ()
if mask is not None:
iterating_kwargs += (('mask', mask),)
shift_signal = self._map_iterate(
_estimate_shift1D,
iterating_kwargs=iterating_kwargs,
data_slice=slice(i1, i2),
ref=ref,
ip=ip,
interpolate=interpolate,
ragged=False,
parallel=parallel,
inplace=False,
show_progressbar=show_progressbar,)
shift_array = shift_signal.data
if max_shift is not None:
if interpolate is True:
max_shift *= ip
shift_array.clip(-max_shift, max_shift)
if interpolate is True:
shift_array = shift_array / ip
shift_array *= axis.scale
if self._lazy:
# We must compute right now because otherwise any changes to the
# axes_manager of the signal later in the workflow may result in
# a wrong shift_array
shift_array = shift_array.compute()
return shift_array
estimate_shift1D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG)
def align1D(self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
also_align=None,
mask=None,
show_progressbar=None):
"""Estimate the shifts in the signal axis using
cross-correlation and use the estimation to align the data in place.
This method can only estimate the shift by comparing
unidimensional
features that should not change the position.
To decrease memory usage, time of computation and improve
accuracy it is convenient to select the feature of interest
setting the `start` and `end` keywords. By default interpolation is
used to obtain subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
also_align : list of signals, None
A list of BaseSignal instances that has exactly the same
dimensions as this one and that will be aligned using the shift map
estimated using the this signal.
mask : `BaseSignal` or bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
Returns
-------
An array with the result of the estimation.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
See also
--------
`estimate_shift1D`
"""
if also_align is None:
also_align = []
self._check_signal_dimension_equals_one()
if self._lazy:
_logger.warning('In order to properly expand, the lazy '
'reference signal will be read twice (once to '
'estimate shifts, and second time to shift '
'appropriatelly), which might take a long time. '
'Use expand=False to only pass through the data '
'once.')
shift_array = self.estimate_shift1D(
start=start,
end=end,
reference_indices=reference_indices,
max_shift=max_shift,
interpolate=interpolate,
number_of_interpolation_points=number_of_interpolation_points,
mask=mask,
show_progressbar=show_progressbar)
signals_to_shift = [self] + also_align
for signal in signals_to_shift:
signal.shift1D(shift_array=shift_array,
interpolation_method=interpolation_method,
crop=crop,
fill_value=fill_value,
expand=expand,
show_progressbar=show_progressbar)
align1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG)
def integrate_in_range(self, signal_range='interactive',
display=True, toolkit=None):
"""Sums the spectrum over an energy range, giving the integrated
area.
The energy range can either be selected through a GUI or the command
line.
Parameters
----------
signal_range : a tuple of this form (l, r) or "interactive"
l and r are the left and right limits of the range. They can be
numbers or None, where None indicates the extremes of the interval.
If l and r are floats the `signal_range` will be in axis units (for
example eV). If l and r are integers the `signal_range` will be in
index units. When `signal_range` is "interactive" (default) the
range is selected using a GUI.
Returns
--------
integrated_spectrum : `BaseSignal` subclass
See Also
--------
`integrate_simpson`
Examples
--------
Using the GUI
>>> s = hs.signals.Signal1D(range(1000))
>>> s.integrate_in_range() #doctest: +SKIP
Using the CLI
>>> s_int = s.integrate_in_range(signal_range=(560,None))
Selecting a range in the axis units, by specifying the
signal range with floats.
>>> s_int = s.integrate_in_range(signal_range=(560.,590.))
Selecting a range using the index, by specifying the
signal range with integers.
>>> s_int = s.integrate_in_range(signal_range=(100,120))
"""
from hyperspy.misc.utils import deprecation_warning
msg = (
"The `Signal1D.integrate_in_range` method is deprecated and will "
"be removed in v2.0. Use a `roi.SpanRoi` followed by `integrate1D` "
"instead.")
deprecation_warning(msg)
signal_range = signal_range_from_roi(signal_range)
if signal_range == 'interactive':
self_copy = self.deepcopy()
ia = IntegrateArea(self_copy, signal_range)
ia.gui(display=display, toolkit=toolkit)
integrated_signal1D = self_copy
else:
integrated_signal1D = self._integrate_in_range_commandline(
signal_range)
return integrated_signal1D
def _integrate_in_range_commandline(self, signal_range):
signal_range = signal_range_from_roi(signal_range)
e1 = signal_range[0]
e2 = signal_range[1]
integrated_signal1D = self.isig[e1:e2].integrate1D(-1)
return integrated_signal1D
def calibrate(self, display=True, toolkit=None):
"""
Calibrate the spectral dimension using a gui.
It displays a window where the new calibration can be set by:
* setting the offset, units and scale directly
* selecting a range by dragging the mouse on the spectrum figure
and setting the new values for the given range limits
Parameters
----------
%s
%s
Notes
-----
For this method to work the output_dimension must be 1.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
calibration = Signal1DCalibration(self)
return calibration.gui(display=display, toolkit=toolkit)
calibrate.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def smooth_savitzky_golay(self,
polynomial_order=None,
window_length=None,
differential_order=0,
parallel=None, display=True, toolkit=None):
"""
Apply a Savitzky-Golay filter to the data in place.
If `polynomial_order` or `window_length` or `differential_order` are
None the method is run in interactive mode.
Parameters
----------
polynomial_order : int, optional
The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
window_length : int, optional
The length of the filter window (i.e. the number of coefficients).
`window_length` must be a positive odd integer.
differential_order: int, optional
The order of the derivative to compute. This must be a
nonnegative integer. The default is 0, which means to filter
the data without differentiating.
%s
%s
%s
Notes
-----
More information about the filter in `scipy.signal.savgol_filter`.
"""
self._check_signal_dimension_equals_one()
if (polynomial_order is not None and
window_length is not None):
axis = self.axes_manager.signal_axes[0]
self.map(savgol_filter, window_length=window_length,
polyorder=polynomial_order, deriv=differential_order,
delta=axis.scale, ragged=False, parallel=parallel)
else:
# Interactive mode
smoother = SmoothingSavitzkyGolay(self)
smoother.differential_order = differential_order
if polynomial_order is not None:
smoother.polynomial_order = polynomial_order
if window_length is not None:
smoother.window_length = window_length
return smoother.gui(display=display, toolkit=toolkit)
smooth_savitzky_golay.__doc__ %= (PARALLEL_ARG, DISPLAY_DT, TOOLKIT_DT)
def smooth_lowess(self,
smoothing_parameter=None,
number_of_iterations=None,
show_progressbar=None,
parallel=None, display=True, toolkit=None):
"""
Lowess data smoothing in place.
If `smoothing_parameter` or `number_of_iterations` are None the method
is run in interactive mode.
Parameters
----------
smoothing_parameter: float or None
Between 0 and 1. The fraction of the data used
when estimating each y-value.
number_of_iterations: int or None
The number of residual-based reweightings
to perform.
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
ImportError
If statsmodels is not installed.
Notes
-----
This method uses the lowess algorithm from the `statsmodels` library,
which needs to be installed to use this method.
"""
if not statsmodels_installed:
raise ImportError("statsmodels is not installed. This package is "
"required for this feature.")
self._check_signal_dimension_equals_one()
if smoothing_parameter is None or number_of_iterations is None:
smoother = SmoothingLowess(self)
if smoothing_parameter is not None:
smoother.smoothing_parameter = smoothing_parameter
if number_of_iterations is not None:
smoother.number_of_iterations = number_of_iterations
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(lowess,
exog=self.axes_manager[-1].axis,
frac=smoothing_parameter,
it=number_of_iterations,
is_sorted=True,
return_sorted=False,
show_progressbar=show_progressbar,
ragged=False,
parallel=parallel)
smooth_lowess.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, DISPLAY_DT,
TOOLKIT_DT)
def smooth_tv(self, smoothing_parameter=None, show_progressbar=None,
parallel=None, display=True, toolkit=None):
"""
Total variation data smoothing in place.
Parameters
----------
smoothing_parameter: float or None
Denoising weight relative to L2 minimization. If None the method
is run in interactive mode.
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if smoothing_parameter is None:
smoother = SmoothingTV(self)
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(_tv_denoise_1d, weight=smoothing_parameter,
ragged=False,
show_progressbar=show_progressbar,
parallel=parallel)
smooth_tv.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, DISPLAY_DT,
TOOLKIT_DT)
def filter_butterworth(self,
cutoff_frequency_ratio=None,
type='low',
order=2, display=True, toolkit=None):
"""
Butterworth filter in place.
Parameters
----------
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
smoother = ButterworthFilter(self)
if cutoff_frequency_ratio is not None:
smoother.cutoff_frequency_ratio = cutoff_frequency_ratio
smoother.type = type
smoother.order = order
smoother.apply()
else:
return smoother.gui(display=display, toolkit=toolkit)
filter_butterworth.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def _remove_background_cli(
self, signal_range, background_estimator, fast=True,
zero_fill=False, show_progressbar=None):
signal_range = signal_range_from_roi(signal_range)
from hyperspy.models.model1d import Model1D
model = Model1D(self)
model.append(background_estimator)
background_estimator.estimate_parameters(
self,
signal_range[0],
signal_range[1],
only_current=False)
if fast and not self._lazy:
try:
axis = self.axes_manager.signal_axes[0].axis
result = self - background_estimator.function_nd(axis)
except MemoryError:
result = self - model.as_signal(
show_progressbar=show_progressbar)
else:
model.set_signal_range(signal_range[0], signal_range[1])
model.multifit(show_progressbar=show_progressbar)
model.reset_signal_range()
result = self - model.as_signal(show_progressbar=show_progressbar)
if zero_fill:
if self._lazy:
low_idx = result.axes_manager[-1].value2index(signal_range[0])
z = da.zeros(low_idx, chunks=(low_idx,))
cropped_da = result.data[low_idx:]
result.data = da.concatenate([z, cropped_da])
else:
result.isig[:signal_range[0]] = 0
return result
def remove_background(
self,
signal_range='interactive',
background_type='Power Law',
polynomial_order=2,
fast=True,
zero_fill=False,
plot_remainder=True,
show_progressbar=None, display=True, toolkit=None):
"""
Remove the background, either in place using a gui or returned as a new
spectrum using the command line.
Parameters
----------
signal_range : "interactive", tuple of ints or floats, optional
If this argument is not specified, the signal range has to be
selected using a GUI. And the original spectrum will be replaced.
If tuple is given, the a spectrum will be returned.
background_type : str
The type of component which should be used to fit the background.
Possible components: PowerLaw, Gaussian, Offset, Polynomial
If Polynomial is used, the polynomial order can be specified
polynomial_order : int, default 2
Specify the polynomial order if a Polynomial background is used.
fast : bool
If True, perform an approximative estimation of the parameters.
If False, the signal is fitted using non-linear least squares
afterwards.This is slower compared to the estimation but
possibly more accurate.
zero_fill : bool
If True, all spectral channels lower than the lower bound of the
fitting range will be set to zero (this is the default behavior
of Gatan's DigitalMicrograph). Setting this value to False
allows for inspection of the quality of background fit throughout
the pre-fitting region.
plot_remainder : bool
If True, add a (green) line previewing the remainder signal after
background removal. This preview is obtained from a Fast calculation
so the result may be different if a NLLS calculation is finally
performed.
%s
%s
%s
Examples
--------
Using gui, replaces spectrum s
>>> s = hs.signals.Signal1D(range(1000))
>>> s.remove_background() #doctest: +SKIP
Using command line, returns a spectrum
>>> s1 = s.remove_background(signal_range=(400,450), background_type='PowerLaw')
Using a full model to fit the background
>>> s1 = s.remove_background(signal_range=(400,450), fast=False)
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if signal_range == 'interactive':
br = BackgroundRemoval(self, background_type=background_type,
polynomial_order=polynomial_order,
fast=fast,
plot_remainder=plot_remainder,
show_progressbar=show_progressbar,
zero_fill=zero_fill)
return br.gui(display=display, toolkit=toolkit)
else:
if background_type in ('PowerLaw', 'Power Law'):
background_estimator = components1d.PowerLaw()
elif background_type == 'Gaussian':
background_estimator = components1d.Gaussian()
elif background_type == 'Offset':
background_estimator = components1d.Offset()
elif background_type == 'Polynomial':
with ignore_warning(message="The API of the `Polynomial` component"):
background_estimator = components1d.Polynomial(
polynomial_order, legacy=False)
else:
raise ValueError(
"Background type: " +
background_type +
" not recognized")
spectra = self._remove_background_cli(
signal_range=signal_range,
background_estimator=background_estimator,
fast=fast,
zero_fill=zero_fill,
show_progressbar=show_progressbar)
return spectra
remove_background.__doc__ %= (SHOW_PROGRESSBAR_ARG, DISPLAY_DT, TOOLKIT_DT)
@interactive_range_selector
def crop_signal1D(self, left_value=None, right_value=None,):
"""Crop in place the spectral dimension.
Parameters
----------
left_value, righ_value : int, float or None
If int the values are taken as indices. If float they are
converted to indices using the spectral axis calibration.
If left_value is None crops from the beginning of the axis.
If right_value is None crops up to the end of the axis. If
both are
None the interactive cropping interface is activated
enabling
cropping the spectrum using a span selector in the signal
plot.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
try:
left_value, right_value = signal_range_from_roi(left_value)
except TypeError:
# It was not a ROI, we carry on
pass
self.crop(axis=self.axes_manager.signal_axes[0].index_in_axes_manager,
start=left_value, end=right_value)
def gaussian_filter(self, FWHM):
"""Applies a Gaussian filter in the spectral dimension in place.
Parameters
----------
FWHM : float
The Full Width at Half Maximum of the gaussian in the
spectral axis units
Raises
------
ValueError
If FWHM is equal or less than zero.
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if FWHM <= 0:
raise ValueError(
"FWHM must be greater than zero")
axis = self.axes_manager.signal_axes[0]
FWHM *= 1 / axis.scale
self.map(gaussian_filter1d, sigma=FWHM / 2.35482, ragged=False)
def hanning_taper(self, side='both', channels=None, offset=0):
"""Apply a hanning taper to the data in place.
Parameters
----------
side : 'left', 'right' or 'both'
Specify which side to use.
channels : None or int
The number of channels to taper. If None 5% of the total
number of channels are tapered.
offset : int
Returns
-------
channels
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if not np.issubdtype(self.data.dtype, np.floating):
raise TypeError("The data dtype should be `float`. It can be "
"changed by using the `change_dtype('float')` "
"method of the signal.")
# TODO: generalize it
self._check_signal_dimension_equals_one()
if channels is None:
channels = int(round(len(self()) * 0.02))
if channels < 20:
channels = 20
dc = self._data_aligned_with_axes
if self._lazy and offset != 0:
shp = dc.shape
if len(shp) == 1:
nav_shape = ()
nav_chunks = ()
else:
nav_shape = shp[:-1]
nav_chunks = dc.chunks[:-1]
zeros = da.zeros(nav_shape + (offset,),
chunks=nav_chunks + ((offset,),))
if side == 'left' or side == 'both':
if self._lazy:
tapered = dc[..., offset:channels + offset]
tapered *= np.hanning(2 * channels)[:channels]
therest = dc[..., channels + offset:]
thelist = [] if offset == 0 else [zeros]
thelist.extend([tapered, therest])
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., offset:channels + offset] *= (
np.hanning(2 * channels)[:channels])
dc[..., :offset] *= 0.
if side == 'right' or side == 'both':
rl = None if offset == 0 else -offset
if self._lazy:
therest = dc[..., :-channels - offset]
tapered = dc[..., -channels - offset:rl]
tapered *= np.hanning(2 * channels)[-channels:]
thelist = [therest, tapered]
if offset != 0:
thelist.append(zeros)
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., -channels - offset:rl] *= (
np.hanning(2 * channels)[-channels:])
if offset != 0:
dc[..., -offset:] *= 0.
if self._lazy:
self.data = dc
self.events.data_changed.trigger(obj=self)
return channels
def find_peaks1D_ohaver(self, xdim=None, slope_thresh=0, amp_thresh=None,
subchannel=True, medfilt_radius=5, maxpeakn=30000,
peakgroup=10, parallel=None):
"""Find positive peaks along a 1D Signal. It detects peaks by looking
for downward zero-crossings in the first derivative that exceed
'slope_thresh'.
'slope_thresh' and 'amp_thresh', control sensitivity: higher
values will neglect broad peaks (slope) and smaller features (amp),
respectively.
`peakgroup` is the number of points around the top of the peak
that are taken to estimate the peak height. For spikes or very
narrow peaks, set `peakgroup` to 1 or 2; for broad or noisy peaks,
make `peakgroup` larger to reduce the effect of noise.
Parameters
----------
slope_thresh : float, optional
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float, optional
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10%% of max(y).
medfilt_radius : int, optional
median filter window to apply to smooth the data
(see scipy.signal.medfilt);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int, optional
number of points around the "top part" of the peak
that are taken to estimate the peak height;
default is set to 10
maxpeakn : int, optional
number of maximum detectable peaks;
default is set to 5000.
subchannel : bool, optional
default is set to True.
%s
Returns
-------
structured array of shape (npeaks) containing fields: 'position',
'width', and 'height' for each peak.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
# TODO: add scipy.signal.find_peaks_cwt
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0].axis
peaks = self.map(find_peaks_ohaver,
x=axis,
slope_thresh=slope_thresh,
amp_thresh=amp_thresh,
medfilt_radius=medfilt_radius,
maxpeakn=maxpeakn,
peakgroup=peakgroup,
subchannel=subchannel,
ragged=True,
parallel=parallel,
inplace=False)
return peaks.data
find_peaks1D_ohaver.__doc__ %= PARALLEL_ARG
def estimate_peak_width(self,
factor=0.5,
window=None,
return_interval=False,
parallel=None,
show_progressbar=None):
"""Estimate the width of the highest intensity of peak
of the spectra at a given fraction of its maximum.
It can be used with asymmetric peaks. For accurate results any
background must be previously substracted.
The estimation is performed by interpolation using cubic splines.
Parameters
----------
factor : 0 < float < 1
The default, 0.5, estimates the FWHM.
window : None or float
The size of the window centred at the peak maximum
used to perform the estimation.
The window size must be chosen with care: if it is narrower
than the width of the peak at some positions or if it is
so wide that it includes other more intense peaks this
method cannot compute the width and a NaN is stored instead.
return_interval: bool
If True, returns 2 extra signals with the positions of the
desired height fraction at the left and right of the
peak.
%s
%s
Returns
-------
width or [width, left, right], depending on the value of
`return_interval`.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
if not 0 < factor < 1:
raise ValueError("factor must be between 0 and 1.")
axis = self.axes_manager.signal_axes[0]
# x = axis.axis
maxval = self.axes_manager.navigation_size
show_progressbar = show_progressbar and maxval > 0
def estimating_function(spectrum,
window=None,
factor=0.5,
axis=None):
x = axis.axis
if window is not None:
vmax = axis.index2value(spectrum.argmax())
slices = axis._get_array_slices(
slice(vmax - window * 0.5, vmax + window * 0.5))
spectrum = spectrum[slices]
x = x[slices]
spline = scipy.interpolate.UnivariateSpline(
x,
spectrum - factor * spectrum.max(),
s=0)
roots = spline.roots()
if len(roots) == 2:
return np.array(roots)
else:
return np.full((2,), np.nan)
both = self._map_iterate(estimating_function,
window=window,
factor=factor,
axis=axis,
ragged=False,
inplace=False,
parallel=parallel,
show_progressbar=show_progressbar)
left, right = both.T.split()
width = right - left
if factor == 0.5:
width.metadata.General.title = (
self.metadata.General.title + " FWHM")
left.metadata.General.title = (
self.metadata.General.title + " FWHM left position")
right.metadata.General.title = (
self.metadata.General.title + " FWHM right position")
else:
width.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum" % factor)
left.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum left position" % factor)
right.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum right position" % factor)
for signal in (left, width, right):
signal.axes_manager.set_signal_dimension(0)
signal.set_signal_type("")
if return_interval is True:
return [width, left, right]
else:
return width
estimate_peak_width.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG)
class LazySignal1D(LazySignal, Signal1D):
"""
"""
_lazy = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.axes_manager.set_signal_dimension(1)
|
gpl-3.0
|
architecture-building-systems/CEAforArcGIS
|
cea/utilities/solar_equations.py
|
2
|
34673
|
"""
solar equations
"""
import numpy as np
import pandas as pd
import ephem
import datetime
import collections
from math import *
from timezonefinder import TimezoneFinder
import pytz
from cea.constants import HOURS_IN_YEAR
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
from cea.utilities.date import get_date_range_hours_from_year
def _ephem_setup(latitude, longitude, altitude, pressure, temperature):
# observer
obs = ephem.Observer()
obs.lat = str(latitude)
obs.lon = str(longitude)
obs.elevation = altitude
obs.pressure = pressure / 100.
obs.temp = temperature
# sun
sun = ephem.Sun()
return obs, sun
def pyephem(datetime_local, latitude, longitude, altitude=0, pressure=101325,
temperature=12):
# Written by Will Holmgren (@wholmgren), University of Arizona, 2014
try:
datetime_utc = datetime_local.tz_convert('UTC')
except ValueError:
raise ('Unknown time zone from the case study.')
sun_coords = pd.DataFrame(index=datetime_local)
obs, sun = _ephem_setup(latitude, longitude, altitude,
pressure, temperature)
# make and fill lists of the sun's altitude and azimuth
# this is the pressure and temperature corrected apparent alt/az.
alts = []
azis = []
for thetime in datetime_utc:
obs.date = ephem.Date(thetime)
sun.compute(obs)
alts.append(sun.alt)
azis.append(sun.az)
sun_coords['apparent_elevation'] = alts
sun_coords['apparent_azimuth'] = azis
# redo it for p=0 to get no atmosphere alt/az
obs.pressure = 0
alts = []
azis = []
for thetime in datetime_utc:
obs.date = ephem.Date(thetime)
sun.compute(obs)
alts.append(sun.alt)
azis.append(sun.az)
sun_coords['elevation'] = alts
sun_coords['azimuth'] = azis
# convert to degrees. add zenith
sun_coords = np.rad2deg(sun_coords)
sun_coords['apparent_zenith'] = 90 - sun_coords['apparent_elevation']
sun_coords['zenith'] = 90 - sun_coords['elevation']
return sun_coords
# solar properties
SunProperties = collections.namedtuple('SunProperties', ['g', 'Sz', 'Az', 'ha', 'trr_mean', 'worst_sh', 'worst_Az'])
def calc_datetime_local_from_weather_file(weather_data, latitude, longitude):
# read date from the weather file
year = weather_data['year'][0]
datetime = get_date_range_hours_from_year(year)
# get local time zone
etc_timezone = get_local_etc_timezone(latitude, longitude)
# convert to local time zone
datetime_local = datetime.tz_localize(tz=etc_timezone)
return datetime_local
def get_local_etc_timezone(latitude, longitude):
'''
This function gets the time zone at a given latitude and longitude in 'Etc/GMT' format.
This time zone format is used in order to avoid issues caused by Daylight Saving Time (DST) (i.e., redundant or
missing times in regions that use DST).
However, note that 'Etc/GMT' uses a counter intuitive sign convention, where West of GMT is POSITIVE, not negative.
So, for example, the time zone for Zurich will be returned as 'Etc/GMT-1'.
:param latitude: Latitude at the project location
:param longitude: Longitude at the project location
'''
# get the time zone at the given coordinates
tf = TimezoneFinder()
time = pytz.timezone(tf.timezone_at(lng=longitude, lat=latitude)).localize(
datetime.datetime(2011, 1, 1)).strftime('%z')
# invert sign and return in 'Etc/GMT' format
if time[0] == '-':
time_zone = 'Etc/GMT+' + time[2]
else:
time_zone = 'Etc/GMT-' + time[2]
return time_zone
def calc_sun_properties(latitude, longitude, weather_data, datetime_local, config):
solar_window_solstice = config.solar.solar_window_solstice
hour_date = datetime_local.hour
min_date = datetime_local.minute
day_date = datetime_local.dayofyear
worst_hour = calc_worst_hour(latitude, weather_data, solar_window_solstice)
# solar elevation, azimuth and values for the 9-3pm period of no shading on the solar solstice
sun_coords = pyephem(datetime_local, latitude, longitude)
sun_coords['declination'] = np.vectorize(declination_degree)(day_date, 365)
sun_coords['hour_angle'] = np.vectorize(get_hour_angle)(longitude, min_date, hour_date, day_date)
worst_sh = sun_coords['elevation'].loc[datetime_local[worst_hour]]
worst_Az = sun_coords['azimuth'].loc[datetime_local[worst_hour]]
# mean transmissivity
weather_data['diff'] = weather_data.difhorrad_Whm2 / weather_data.glohorrad_Whm2
T_G_hour = weather_data[np.isfinite(weather_data['diff'])]
T_G_day = np.round(T_G_hour.groupby(['dayofyear']).mean(), 2)
T_G_day['diff'] = T_G_day['diff'].replace(1, 0.90)
transmittivity = (1 - T_G_day['diff']).mean()
return SunProperties(g=sun_coords['declination'], Sz=sun_coords['zenith'], Az=sun_coords['azimuth'],
ha=sun_coords['hour_angle'], trr_mean=transmittivity, worst_sh=worst_sh, worst_Az=worst_Az)
def calc_sunrise(sunrise, Yearsimul, longitude, latitude):
o, s = _ephem_setup(latitude, longitude, altitude=0, pressure=101325, temperature=12)
for day in range(1, 366): # Calculated according to NOAA website
o.date = datetime.datetime(Yearsimul, 1, 1) + datetime.timedelta(day - 1)
next_event = o.next_rising(s)
sunrise[day - 1] = next_event.datetime().hour
return sunrise
def declination_degree(day_date, TY):
"""The declination of the sun is the angle between Earth's equatorial plane and a line
between the Earth and the sun. It varies between 23.45 degrees and -23.45 degrees,
hitting zero on the equinoxes and peaking on the solstices. [1]_
:param when: datetime.datetime, date/time for which to do the calculation
:param TY: float, Total number of days in a year. eg. 365 days per year,(no leap days)
:param DEC: float, The declination of the Sun
.. [1] http://pysolar.org/
"""
return 23.45 * np.vectorize(sin)((2 * pi / (TY)) * (day_date - 81))
def get_hour_angle(longitude_deg, min_date, hour_date, day_date):
solar_time = get_solar_time(longitude_deg, min_date, hour_date, day_date)
return 15 * (12 - solar_time)
def get_solar_time(longitude_deg, min_date, hour_date, day_date):
"""
returns solar time in hours for the specified longitude and time,
accurate only to the nearest minute.
longitude_deg
min_date
hour_date
day_date
"""
solar_time_min = hour_date * 60 + min_date + 4 * longitude_deg + get_equation_of_time(day_date)
return solar_time_min / 60
def get_equation_of_time(day_date):
B = (day_date - 1) * 360 / 365
E = 229.2 * (0.000075 + 0.001868 * cos(B) - 0.032077 * sin(B) - 0.014615 * cos(2 * B) - 0.04089 * sin(2 * B))
return E
# filter sensor points with low solar potential
def filter_low_potential(radiation_json_path, metadata_csv_path, config):
"""
To filter the sensor points/hours with low radiation potential.
#. keep sensors above min radiation
#. eliminate points when hourly production < 50 W/m2
#. augment the solar radiation due to differences between panel reflectance and original reflectances used in daysim
:param radiation_csv: solar insulation data on all surfaces of each building
:type radiation_csv: .csv
:param metadata_csv: solar insulation sensor data of each building
:type metadata_csv: .csv
:return max_annual_radiation: yearly horizontal radiation [Wh/m2/year]
:rtype max_annual_radiation: float
:return annual_radiation_threshold: minimum yearly radiation threshold for sensor selection [Wh/m2/year]
:rtype annual_radiation_threshold: float
:return sensors_rad_clean: radiation data of the filtered sensors [Wh/m2]
:rtype sensors_rad_clean: dataframe
:return sensors_metadata_clean: data of filtered sensor points measuring solar insulation of each building
:rtype sensors_metadata_clean: dataframe
Following assumptions are made:
#. Sensor points with low yearly radiation are deleted. The threshold (minimum yearly radiation) is a percentage
of global horizontal radiation. The percentage threshold (min_radiation) is a global variable defined by users.
#. For each sensor point kept, the radiation value is set to zero when radiation value is below 50 W/m2.
#. No solar panels on windows.
"""
def f(x):
if x <= 50:
return 0
else:
return x
# read radiation file
sensors_rad = pd.read_json(radiation_json_path)
sensors_metadata = pd.read_csv(metadata_csv_path)
# join total radiation to sensor_metadata
sensors_rad_sum = sensors_rad.sum(0).to_frame('total_rad_Whm2') # add new row with yearly radiation
sensors_metadata.set_index('SURFACE', inplace=True)
sensors_metadata = sensors_metadata.merge(sensors_rad_sum, left_index=True, right_index=True) # [Wh/m2]
# remove window surfaces
sensors_metadata = sensors_metadata[sensors_metadata.TYPE != 'windows']
# keep sensors if allow pv installation on walls or on roofs
if config.solar.panel_on_roof is False:
sensors_metadata = sensors_metadata[sensors_metadata.TYPE != 'roofs']
if config.solar.panel_on_wall is False:
sensors_metadata = sensors_metadata[sensors_metadata.TYPE != 'walls']
# set min yearly radiation threshold for sensor selection
# keep sensors above min production in sensors_rad
max_annual_radiation = sensors_rad_sum.max().values[0]
annual_radiation_threshold_Whperm2 = float(config.solar.annual_radiation_threshold)*1000
sensors_metadata_clean = sensors_metadata[sensors_metadata.total_rad_Whm2 >= annual_radiation_threshold_Whperm2]
sensors_rad_clean = sensors_rad[sensors_metadata_clean.index.tolist()] # keep sensors above min radiation
sensors_rad_clean = sensors_rad_clean.applymap(lambda x: f(x))
return max_annual_radiation, annual_radiation_threshold_Whperm2, sensors_rad_clean, sensors_metadata_clean
# custom rooftop solar panel tilt angle
def calc_spacing_custom_angle(sensors_metadata_clean, solar_properties, max_rad_Whperm2yr, panel_properties,
panel_tilt_angle, roof_coverage):
"""
This function first determines the row spacing and surface azimuth of panels at a custom tilt angle installed at
each sensor point. Secondly, the installed PV module areas at each sensor point are calculated. Lastly, all the
modules are categorized by their surface azimuth, tilt angle, and yearly radiation. The output will then be used to
calculate the absorbed radiation.
:param sensors_metadata_clean: data of filtered sensor points measuring solar insulation of each building
:type sensors_metadata_clean: dataframe
:param latitude: latitude of the case study location
:type latitude: float
:param solar_properties: A SunProperties, using worst_sh: solar elevation at the worst hour [degree], worst_Az: solar azimuth at the worst hour [degree]
and trr_mean: transmissivity / clearness index [-]
:type solar_properties: cea.utilities.solar_equations.SunProperties
:param module_length_m: length of the PV module [m]
:type module_length_m: float
:param max_rad_Whperm2yr: max radiation received on surfaces [Wh/m2/year]
:type max_rad_Whperm2yr: float
:param panel_properties: Properties of the PV/PVT/SC panels selected (from systems database)
:type panel_properties: dict
:param panel_tilt_angle: custom panel tilt angle to be used for the spacing calculation
:type panel_tilt_angle: float
:param roof_coverage: Maximum panel coverage of roof surfaces that reach minimum irradiation threshold. e.g., if 0.8 is selected, only 80% of the areas that reach the minimum irradiation threshold will be covered by PV.
:type roof_coverage: float
:returns sensors_metadata_clean: data of filtered sensor points categorized with module tilt angle, array spacing,
surface azimuth, installed PV module area of each sensor point and the categories
:rtype sensors_metadata_clean: dataframe
Assumptions:
#. Row spacing: Determine the row spacing by minimizing the shadow according to the solar elevation and azimuth at
the worst hour of the year. The worst hour is a global variable defined by users.
#. Surface azimuth (orientation) of panels: If the sensor is on a tilted roof, the orientation of the panel is the
same as the roof. Sensors on flat roofs are all south facing.
"""
# calculate panel tilt angle (B) for flat roofs (tilt < 5 degrees), slope roofs and walls.
input_angle_rad = radians(panel_tilt_angle)
sensors_metadata_clean['tilt_deg'] = np.vectorize(acos)(sensors_metadata_clean['Zdir']) # surface tilt angle in rad
sensors_metadata_clean['tilt_deg'] = np.vectorize(degrees)(sensors_metadata_clean['tilt_deg']) # surface tilt angle in degrees
sensors_metadata_clean['B_deg'] = np.where(sensors_metadata_clean['tilt_deg'] >= 5,
sensors_metadata_clean['tilt_deg'],
degrees(input_angle_rad)) # panel tilt angle in degrees
# calculate spacing and surface azimuth of the panels for flat roofs
module_length_m = panel_properties['module_length_m']
optimal_spacing_flat_m = calc_optimal_spacing(solar_properties, input_angle_rad, module_length_m)
sensors_metadata_clean['array_spacing_m'] = np.where(sensors_metadata_clean['tilt_deg'] >= 5, 0,
optimal_spacing_flat_m)
sensors_metadata_clean['surface_azimuth_deg'] = np.vectorize(calc_surface_azimuth)(sensors_metadata_clean['Xdir'],
sensors_metadata_clean['Ydir'],
sensors_metadata_clean[
'B_deg']) # degrees
# calculate the surface area required to install one pv panel on flat roofs with defined tilt angle and array spacing
if panel_properties['type'] == 'PV':
module_width_m = module_length_m # for PV
else:
module_width_m = panel_properties['module_area_m2'] / module_length_m # for FP, ET
module_flat_surface_area_m2 = module_width_m * (sensors_metadata_clean.array_spacing_m / 2 +
module_length_m * cos(input_angle_rad))
area_per_module_m2 = module_width_m * module_length_m
# calculate the pv/solar collector module area within the area of each sensor point
sensors_metadata_clean['area_installed_module_m2'] = np.where(
sensors_metadata_clean['tilt_deg'] >= 5, sensors_metadata_clean.AREA_m2,
area_per_module_m2 * (roof_coverage * sensors_metadata_clean.AREA_m2 / module_flat_surface_area_m2))
# categorize the sensors by surface_azimuth, B, GB
result = np.vectorize(calc_categoriesroof)(sensors_metadata_clean.surface_azimuth_deg, sensors_metadata_clean.B_deg,
sensors_metadata_clean.total_rad_Whm2, max_rad_Whperm2yr)
sensors_metadata_clean['CATteta_z'] = result[0]
sensors_metadata_clean['CATB'] = result[1]
sensors_metadata_clean['CATGB'] = result[2]
return sensors_metadata_clean
# optimal tilt angle and spacing of solar panels
def optimal_angle_and_tilt(sensors_metadata_clean, latitude, solar_properties, max_rad_Whperm2yr, panel_properties,
roof_coverage):
"""
This function first determines the optimal tilt angle, row spacing and surface azimuth of panels installed at each
sensor point. Secondly, the installed PV module areas at each sensor point are calculated. Lastly, all the modules
are categorized with its surface azimuth, tilt angle, and yearly radiation. The output will then be used to
calculate the absorbed radiation.
:param sensors_metadata_clean: data of filtered sensor points measuring solar insulation of each building
:type sensors_metadata_clean: dataframe
:param latitude: latitude of the case study location
:type latitude: float
:param solar_properties: A SunProperties, using worst_sh: solar elevation at the worst hour [degree],
worst_Az: solar azimuth at the worst hour [degree]
and trr_mean: transmissivity / clearness index [-]
:type solar_properties: cea.utilities.solar_equations.SunProperties
:param module_length_m: length of the PV module [m]
:type module_length_m: float
:param max_rad_Whperm2yr: max radiation received on surfaces [Wh/m2/year]
:type max_rad_Whperm2yr: float
:param panel_properties: Properties of the PV/PVT/SC panels selected (from systems database)
:type panel_properties: dict
:param roof_coverage: Maximum panel coverage of roof surfaces that reach minimum irradiation threshold. e.g., if 0.8 is selected, only 80% of the areas that reach the minimum irradiation threshold will be covered by PV.
:type roof_coverage: float
:returns sensors_metadata_clean: data of filtered sensor points categorized with module tilt angle, array spacing,
surface azimuth, installed PV module area of each sensor point and the categories
:rtype sensors_metadata_clean: dataframe
Assumptions:
#. Tilt angle: If the sensor is on tilted roof, the panel will have the same tilt as the roof. If the sensor is on
a wall, the tilt angle is 90 degree. Tilt angles for flat roof is determined using the method from Quinn et al.
#. Row spacing: Determine the row spacing by minimizing the shadow according to the solar elevation and azimuth at
the worst hour of the year. The worst hour is a global variable defined by users.
#. Surface azimuth (orientation) of panels: If the sensor is on a tilted roof, the orientation of the panel is the
same as the roof. Sensors on flat roofs are all south facing.
"""
# calculate panel tilt angle (B) for flat roofs (tilt < 5 degrees), slope roofs and walls.
optimal_angle_flat_rad = calc_optimal_angle(180, latitude,
solar_properties.trr_mean) # assume surface azimuth = 180 (N,E), south facing
sensors_metadata_clean['tilt_deg'] = np.vectorize(acos)(sensors_metadata_clean['Zdir']) # surface tilt angle in rad
sensors_metadata_clean['tilt_deg'] = np.vectorize(degrees)(
sensors_metadata_clean['tilt_deg']) # surface tilt angle in degrees
sensors_metadata_clean['B_deg'] = np.where(sensors_metadata_clean['tilt_deg'] >= 5,
sensors_metadata_clean['tilt_deg'],
degrees(optimal_angle_flat_rad)) # panel tilt angle in degrees
# calculate spacing and surface azimuth of the panels for flat roofs
module_length_m = panel_properties['module_length_m']
optimal_spacing_flat_m = calc_optimal_spacing(solar_properties, optimal_angle_flat_rad, module_length_m)
sensors_metadata_clean['array_spacing_m'] = np.where(sensors_metadata_clean['tilt_deg'] >= 5, 0,
optimal_spacing_flat_m)
sensors_metadata_clean['surface_azimuth_deg'] = np.vectorize(calc_surface_azimuth)(sensors_metadata_clean['Xdir'],
sensors_metadata_clean['Ydir'],
sensors_metadata_clean[
'B_deg']) # degrees
# calculate the surface area required to install one pv panel on flat roofs with defined tilt angle and array spacing
if panel_properties['type'] == 'PV':
module_width_m = module_length_m # for PV
else:
module_width_m = panel_properties['module_area_m2'] / module_length_m # for FP, ET
module_flat_surface_area_m2 = module_width_m * (sensors_metadata_clean.array_spacing_m / 2 +
module_length_m * cos(optimal_angle_flat_rad))
area_per_module_m2 = module_width_m * module_length_m
# calculate the pv/solar collector module area within the area of each sensor point
sensors_metadata_clean['area_installed_module_m2'] = np.where(sensors_metadata_clean['tilt_deg'] >= 5,
sensors_metadata_clean.AREA_m2,
roof_coverage * area_per_module_m2 *
(sensors_metadata_clean.AREA_m2 /
module_flat_surface_area_m2))
# categorize the sensors by surface_azimuth, B, GB
result = np.vectorize(calc_categoriesroof)(sensors_metadata_clean.surface_azimuth_deg, sensors_metadata_clean.B_deg,
sensors_metadata_clean.total_rad_Whm2, max_rad_Whperm2yr)
sensors_metadata_clean['CATteta_z'] = result[0]
sensors_metadata_clean['CATB'] = result[1]
sensors_metadata_clean['CATGB'] = result[2]
return sensors_metadata_clean
def calc_optimal_angle(teta_z, latitude, transmissivity):
"""
To calculate the optimal tilt angle of the solar panels.
:param teta_z: surface azimuth, 0 degree south (east negative) or 0 degree north (east positive)
:type teta_z: float
:param latitude: latitude of the case study site
:type latitude: float
:param transmissivity: clearness index [-]
:type transmissivity: float
:return abs(b): optimal tilt angle [radians]
:rtype abs(b): float
..[Quinn et al., 2013] S.W.Quinn, B.Lehman.A simple formula for estimating the optimum tilt angles of photovoltaic
panels. 2013 IEEE 14th Work Control Model Electron, Jun, 2013, pp.1-8
"""
if transmissivity <= 0.15:
gKt = 0.977
elif 0.15 < transmissivity <= 0.7:
gKt = 1.237 - 1.361 * transmissivity
else:
gKt = 0.273
Tad = 0.98 # transmittance-absorptance product of the diffuse radiation
Tar = 0.97 # transmittance-absorptance product of the reflected radiation
Pg = 0.2 # ground reflectance of 0.2
l = radians(latitude)
a = radians(teta_z)
b = atan((cos(a) * tan(l)) * (1 / (1 + ((Tad * gKt - Tar * Pg) / (2 * (1 - gKt)))))) # eq.(11)
return abs(b)
def calc_optimal_spacing(sun_properties, tilt_angle, module_length):
"""
To calculate the optimal spacing between each panel to avoid shading.
:param sun_properties: SunProperties, using worst_sh (Solar elevation at the worst hour [degree]) and worst_Az
(Solar Azimuth [degree] at the worst hour)
:type sun_properties: SunProperties
:param tilt_angle: optimal tilt angle for panels on flat surfaces [degree]
:type tilt_angle: float
:param module_length: [m]
:type module_length: float
:return D: optimal distance in [m]
:rtype D: float
"""
h = module_length * sin(tilt_angle)
D1 = h / tan(radians(sun_properties.worst_sh))
D = max(D1 * cos(radians(180 - sun_properties.worst_Az)), D1 * cos(radians(sun_properties.worst_Az - 180)))
return D
def calc_categoriesroof(teta_z, B, GB, Max_Isol):
"""
To categorize solar panels by the surface azimuth, tilt angle and yearly radiation.
:param teta_z: surface azimuth [degree], 0 degree north (east positive, west negative)
:type teta_z: float
:param B: solar panel tile angle [degree]
:type B: float
:param GB: yearly radiation of sensors [Wh/m2/year]
:type GB: float
:param Max_Isol: maximum radiation received on surfaces [Wh/m2/year]
:type Max_Isol: float
:return CATteta_z: category of surface azimuth
:rtype CATteta_z: float
:return CATB: category of tilt angle
:rtype CATB: float
:return CATBG: category of yearly radiation
:rtype CATBG: float
"""
if -122.5 < teta_z <= -67:
CATteta_z = 1
elif -67 < teta_z <= -22.5:
CATteta_z = 3
elif -22.5 < teta_z <= 22.5:
CATteta_z = 5
elif 22.5 < teta_z <= 67:
CATteta_z = 4
elif 67 <= teta_z <= 122.5:
CATteta_z = 2
else:
CATteta_z = 6
B = degrees(B)
if 0 < B <= 5:
CATB = 1 # flat roof
elif 5 < B <= 15:
CATB = 2 # tilted 5-15 degrees
elif 15 < B <= 25:
CATB = 3 # tilted 15-25 degrees
elif 25 < B <= 40:
CATB = 4 # tilted 25-40 degrees
elif 40 < B <= 60:
CATB = 5 # tilted 40-60 degrees
elif B > 60:
CATB = 6 # tilted >60 degrees
else:
CATB = None
print('B not in expected range')
GB_percent = GB / Max_Isol
# if 0 < GB_percent <= 0.05:
# CATGB = 1
# elif 0.05 < GB_percent <= 0.1:
# CATGB = 2
# elif 0.1 < GB_percent <= 0.15:
# CATGB = 3
# elif 0.15 < GB_percent <= 0.2:
# CATGB = 4
# elif 0.2 < GB_percent <= 0.25:
# CATGB = 5
# elif 0.25 < GB_percent <= 0.3:
# CATGB = 6
# elif 0.3 < GB_percent <= 0.35:
# CATGB = 7
# elif 0.35 < GB_percent <= 0.4:
# CATGB = 8
# elif 0.4 < GB_percent<= 0.45:
# CATGB = 9
# elif 0.45 < GB_percent <= 0.5:
# CATGB = 10
# elif 0.5 < GB_percent <= 0.55:
# CATGB = 11
# elif 0.55 < GB_percent <= 0.6:
# CATGB = 12
# elif 0.6 < GB_percent <= 0.65:
# CATGB = 13
# elif 0.65 < GB_percent <= 0.7:
# CATGB = 14
# elif 0.7 < GB_percent <= 0.75:
# CATGB = 15
# elif 0.75 < GB_percent <= 0.8:
# CATGB = 16
# elif 0.8 < GB_percent <= 0.85:
# CATGB = 17
# elif 0.85 < GB_percent <= 0.9:
# CATGB = 18
# elif 0.9 < GB_percent <= 0.95:
# CATGB = 19
# elif 0.95 < GB_percent <= 1:
# CATGB = 20
# else:
# CATGB = None
# print('GB not in expected range')
if 0 < GB_percent <= 0.1:
CATGB = 1
elif 0.1 < GB_percent <= 0.2:
CATGB = 2
elif 0.2 < GB_percent <= 0.3:
CATGB = 3
elif 0.3 < GB_percent <= 0.4:
CATGB = 4
elif 0.4 < GB_percent<= 0.5:
CATGB = 5
elif 0.5 < GB_percent <= 0.6:
CATGB = 6
elif 0.6 < GB_percent <= 0.7:
CATGB = 7
elif 0.7 < GB_percent <= 0.8:
CATGB = 8
elif 0.8 < GB_percent <= 0.9:
CATGB = 9
elif 0.90 < GB_percent <= 1:
CATGB = 10
else:
CATGB = None
print('GB not in expected range')
return CATteta_z, CATB, CATGB
def calc_surface_azimuth(xdir, ydir, B):
"""
Calculate surface azimuth from the surface normal vector (x,y,z) and tilt angle (B).
Following the geological sign convention, an azimuth of 0 and 360 degree represents north, 90 degree is east.
:param xdir: surface normal vector x in (x,y,z) representing east-west direction
:param ydir: surface normal vector y in (x,y,z) representing north-south direction
:param B: surface tilt angle in degree
:type xdir: float
:type ydir: float
:type B: float
:returns surface azimuth: the azimuth of the surface of a solar panel in degree
:rtype surface_azimuth: float
"""
B = radians(B)
teta_z = degrees(asin(xdir / sin(B)))
# set the surface azimuth with on the sing convention (E,N)=(+,+)
if xdir < 0:
if ydir < 0:
surface_azimuth = 180 + teta_z # (xdir,ydir) = (-,-)
else:
surface_azimuth = 360 + teta_z # (xdir,ydir) = (-,+)
elif ydir < 0:
surface_azimuth = 180 + teta_z # (xdir,ydir) = (+,-)
else:
surface_azimuth = teta_z # (xdir,ydir) = (+,+)
return surface_azimuth # degree
# calculate angle of incident
def calc_incident_angle_beam(g, lat, ha, tilt, teta_z):
# calculate incident angle beam radiation
part1 = sin(lat) * sin(g) * cos(tilt) - cos(lat) * sin(g) * sin(tilt) * cos(teta_z)
part2 = cos(lat) * cos(g) * cos(ha) * cos(tilt) + sin(lat) * cos(g) * cos(ha) * sin(tilt) * cos(teta_z)
part3 = cos(g) * sin(ha) * sin(tilt) * sin(teta_z)
teta_B = acos(part1 + part2 + part3)
return teta_B # in radains
def calc_angle_of_incidence(g, lat, ha, tilt, teta_z):
"""
To calculate angle of incidence from solar vector and surface normal vector.
(Validated with Sandia pvlib.irrandiance.aoi)
:param lat: latitude of the loacation of case study [radians]
:param g: declination of the solar position [radians]
:param ha: hour angle [radians]
:param tilt: panel surface tilt angle [radians]
:param teta_z: panel surface azimuth angle [radians]
:type lat: float
:type g: float
:type ha: float
:type tilt: float
:type teta_z: float
:return teta_B: angle of incidence [radians]
:rtype teta_B: float
.. [Sproul, A. B., 2017] Sproul, A.B. (2007). Derivation of the solar geometric relationships using vector analysis.
Renewable Energy, 32(7), 1187-1205.
"""
# surface normal vector
n_E = sin(tilt) * sin(teta_z)
n_N = sin(tilt) * cos(teta_z)
n_Z = cos(tilt)
# solar vector
s_E = -cos(g) * sin(ha)
s_N = sin(g) * cos(lat) - cos(g) * sin(lat) * cos(ha)
s_Z = cos(g) * cos(lat) * cos(ha) + sin(g) * sin(lat)
# angle of incidence
teta_B = acos(n_E * s_E + n_N * s_N + n_Z * s_Z)
return teta_B
# calculate sensor properties in each group
def calc_groups(radiation_of_sensors_clean, sensors_metadata_cat):
"""
To calculate the mean hourly radiation of sensors in each group.
:param radiation_of_sensors_clean: radiation data of the filtered sensors
:type radiation_of_sensors_clean: dataframe
:param sensors_metadata_cat: data of filtered sensor points categorized with module tilt angle, array spacing,
surface azimuth, installed PV module area of each sensor point
:type sensors_metadata_cat: dataframe
:return number_groups: number of groups of sensor points
:rtype number_groups: float
:return hourlydata_groups: mean hourly radiation of sensors in each group
:rtype hourlydata_groups: dataframe
:return number_points: number of sensor points in each group
:rtype number_points: array
:return prop_observers: values of sensor properties of each group of sensors
:rtype prop_observers: dataframe
"""
# calculate number of groups as number of optimal combinations.
sensors_metadata_cat['type_orientation'] = sensors_metadata_cat['TYPE'] + '_' + sensors_metadata_cat['orientation']
sensors_metadata_cat['surface'] = sensors_metadata_cat.index
sensor_groups_ob = sensors_metadata_cat.groupby(
['CATB', 'CATGB', 'CATteta_z', 'type_orientation']) # group the sensors by categories
number_groups = sensor_groups_ob.size().count() # TODO: check if redundant, it is actually equal to group_count
group_keys = sensor_groups_ob.groups.keys()
# empty dicts to store results
group_properties = {}
group_mean_radiations = {}
number_points = {}
group_count = 0
for key in group_keys:
# get surface names in group
surfaces_in_group = sensor_groups_ob['surface'].groups[key].values
number_points[group_count] = len(surfaces_in_group)
# write group properties
group_key = pd.Series({'CATB': key[0], 'CATGB': key[1], 'CATteta_z': key[2], 'type_orientation': key[3]})
group_info = pd.Series({'number_srfs': number_points, 'srfs': (''.join(surfaces_in_group))})
group_prop_sum = sensor_groups_ob.sum().loc[key,:][['AREA_m2','area_installed_module_m2']]
group_prop_mean = sensor_groups_ob.mean().loc[key,:].drop(['area_installed_module_m2', 'AREA_m2'])
group_properties[group_count] = group_key.append(group_prop_mean).append(group_prop_sum).append(group_info)
# calculate mean radiation among surfaces in group
group_mean_radiations[group_count] = radiation_of_sensors_clean[surfaces_in_group].mean(axis=1).values
group_count += 1
prop_observers = pd.DataFrame(group_properties).T
hourlydata_groups = pd.DataFrame(group_mean_radiations)
panel_groups = {'number_groups': number_groups, 'number_points': number_points,
'hourlydata_groups': hourlydata_groups, 'prop_observers': prop_observers}
return panel_groups
# calculate the worst hour
def calc_worst_hour(latitude, weather_data, solar_window_solstice):
"""
Calculate the first hour of solar window of the winter solstice for panel spacing.
http://www.affordable-solar.com/learning-center/building-a-system/calculating-tilted-array-spacing/
:param latitude: latitude of the site [degree]
:type latitude: float
:param weather_data: weather data of the site
:type weather_data: pd.dataframe
:param solar_window_solstice: the desired hour of shade-free solar window on the winter solstice.
:type solar_window_solstice: floar
:return worst_hour: the hour to calculate minimum spacing
:rtype worst_hour: float
"""
if latitude > 0:
northern_solstice = weather_data.query('month == 12 & day == 21')
worst_hour = northern_solstice[northern_solstice.hour == (12 - round(solar_window_solstice / 2))].index[0]
else:
southern_solstice = weather_data.query('month == 6 & day == 21')
worst_hour = southern_solstice[southern_solstice.hour == (12 - round(solar_window_solstice / 2))].index[0]
return worst_hour
def cal_radiation_type(group, hourly_radiation, weather_data):
radiation_Wperm2 = pd.DataFrame({'I_sol': hourly_radiation[group]})
radiation_Wperm2['I_diffuse'] = weather_data.ratio_diffhout * radiation_Wperm2.I_sol # calculate diffuse radiation
radiation_Wperm2['I_direct'] = radiation_Wperm2['I_sol'] - radiation_Wperm2[
'I_diffuse'] # calculate direct radiation
radiation_Wperm2.fillna(0, inplace=True) # set nan to zero
return radiation_Wperm2
|
mit
|
jm-begon/scikit-learn
|
examples/applications/face_recognition.py
|
191
|
5513
|
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
|
bsd-3-clause
|
cpadavis/SpaceWarps
|
analysis/make_info_plots.py
|
2
|
10954
|
#!/usr/bin/env python
from subprocess import call
import sys,getopt,numpy as np
import matplotlib
from pylab import *;
import swap
def make_info_plots(argv):
"""
NAME
make_info_plots
PURPOSE
Given stage1 and stage2 bureau pickles, this script produces the
several plots for the crowd analysis
COMMENTS
FLAGS
-h Print this message
INPUTS
stage1_bureau.pickle
stage2_bureau.pickle
OUTPUTS
Various png plots.
EXAMPLE
BUGS
- Code is not tested yet...
AUTHORS
This file is part of the Space Warps project, and is distributed
under the MIT license by the Space Warps Science Team.
http://spacewarps.org/
HISTORY
2014-06-27 started More & More (Kavli IPMU)
"""
# ------------------------------------------------------------------
try:
opts, args = getopt.getopt(argv,"h",["help"])
except getopt.GetoptError, err:
print str(err) # will print something like "option -a not recognized"
print make_info_plots.__doc__ # will print the big comment above.
return
for o,a in opts:
if o in ("-h", "--help"):
print make_info_plots.__doc__
return
else:
assert False, "unhandled option"
# Check for pickles in array args:
if len(args) == 2:
bureau1_path = args[0]
bureau2_path = args[1]
print "make_info_plots: illustrating behaviour captured in bureau files: "
print "make_info_plots: ",bureau1_path
print "make_info_plots: ",bureau2_path
else:
print make_info_plots.__doc__
return
# Read in bureau objects:
bureau1 = swap.read_pickle(bureau1_path, 'bureau')
bureau2 = swap.read_pickle(bureau2_path, 'bureau')
print "make_info_plots: stage 1, 2 agent numbers: ",len(bureau1.list()), len(bureau2.list())
experience1 = []
effort1 = []
final_skill1 = []
final_PL1 =[]
final_PD1 =[]
information1 = []
contribution1 = []
experience2 = []
effort2 = []
final_skill2 = []
final_PL2 =[]
final_PD2 =[]
information2 = []
contribution2 = []
##
Ntrajectory=50
for ID in bureau1.list():
agent = bureau1.member[ID]
effort1.append(agent.N-agent.NT)
experience1.append(agent.NT)
final_skill1.append(agent.traininghistory['Skill'][-1])
final_PL1.append(np.mean(agent.get_PL_realization(Ntrajectory)))
final_PD1.append(np.mean(agent.get_PD_realization(Ntrajectory)))
information1.append(agent.testhistory['I'].sum())
contribution1.append(agent.testhistory['Skill'].sum())
for ID in bureau2.list():
agent = bureau2.member[ID]
effort2.append(agent.N-agent.NT)
experience2.append(agent.NT)
final_skill2.append(agent.traininghistory['Skill'][-1])
final_PL2.append(np.mean(agent.get_PL_realization(Ntrajectory)))
final_PD2.append(np.mean(agent.get_PD_realization(Ntrajectory)))
information2.append(agent.testhistory['I'].sum())
contribution2.append(agent.testhistory['Skill'].sum())
## PL-PD plot
def plotplpd(xx,yy,zz,which,ztitle):
bins=100;
ax=subplot(2,2,which,aspect=1.);
hist2d(xx,yy,bins,weights=zz,norm=matplotlib.colors.LogNorm());
cbar=colorbar();
cbar.solids.set_edgecolor("face");
ax.set_xlabel("P$_L$");
ax.set_ylabel("P$_D$");
ax.set_title(ztitle);
ax.set_xlim(0,1);
ax.set_ylim(0,1);
xx=np.arange(-0.1,2,0.1);
ax.axhline(0.5,color="k",linestyle='dashed');
ax.axvline(0.5,color="k",linestyle='dashed');
ax.plot(xx,1-xx,color="k");
###########################
##Users
###########################
plotplpd(final_PL1,final_PD1,None,1,"Stage1 Users")
plotplpd(final_PL2,final_PD2,None,2,"Stage2 Users")
savefig("users_plpd.png")
clf();
###########################
##Effort
###########################
plotplpd(final_PL1,final_PD1,effort1,1,"Stage 1 Effort")
plotplpd(final_PL2,final_PD2,effort2,2,"Stage 2 Effort")
savefig("effort_plpd.png")
clf();
###########################
##Experience
###########################
plotplpd(final_PL1,final_PD1,experience1,1,"Stage 1 Experience")
plotplpd(final_PL2,final_PD2,experience2,2,"Stage 2 Experience");
savefig("experience_plpd.png")
clf();
###########################
##Contribution
###########################
plotplpd(final_PL1,final_PD1,contribution1,1,"Stage 1 Contribution")
plotplpd(final_PL2,final_PD2,contribution2,2,"Stage 2 Contribution")
savefig("contribution_plpd.png")
clf();
###########################
##Average Information
###########################
plotplpd(final_PL1,final_PD1,information1,1,"Stage 1 Information")
plotplpd(final_PL2,final_PD2,information2,2,"Stage 2 Information")
savefig("information_plpd.png")
clf();
###########################
##Skill PL PD plot
###########################
bins=101
skill=np.zeros(bins*bins);
skill=np.reshape(skill,(bins,bins));
for ii in range(bins):
M_ll=0.01*ii;
for jj in range(bins):
M_nn=0.01*jj;
skill[ii][jj]=swap.expectedInformationGain(0.5, M_ll, M_nn);
ax=subplot(1,1,1);
im=ax.imshow(skill,origin='lower',extent=(0,1,0,1));
cbar=colorbar(im);
cbar.solids.set_edgecolor("face");
ax.set_xlim(0,1);
ax.set_ylim(0,1);
ax.set_xlabel("P$_L$");
ax.set_ylabel("P$_D$");
ax.set_title("Skill");
xx=np.arange(-0.1,2,0.1);
ax.axhline(0.5,color="k",linestyle='dashed');
ax.axvline(0.5,color="k",linestyle='dashed');
ax.plot(xx,1-xx,color="k");
savefig("skill_plpd.png")
clf();
###########################
## Cumulative effort and users vs. skill
###########################
bins=100
ax=subplot(2,2,1);
hist(final_skill1,bins,cumulative=True,normed=1,color=(0.8,0.2,0.2),histtype='stepfilled',label="Users",range=(0,1));
hist(final_skill1,bins,weights=effort1, cumulative=True,color=(1.0,0.7,0.5),normed=1,histtype='stepfilled',label="Effort",range=(0,1));
ax.set_xlabel("Skill");
ax.set_ylim(0,1.)
ax.set_ylabel("Cumulative Fraction");
ax.set_title("Stage 1")
legend(loc=4);
ax=subplot(2,2,2);
hist(final_skill2,bins,cumulative=True,normed=1,color=(0.8,0.2,0.2),histtype='stepfilled',label="Users",range=(0,1));
hist(final_skill2,bins,weights=effort2, cumulative=True,color=(1.0,0.7,0.5),normed=1,histtype='stepfilled',label="Effort",range=(0,1));
ax.set_xlabel("Skill");
ax.set_ylim(0,1.)
ax.set_ylabel("Cumulative Fraction");
ax.set_title("Stage 2")
legend(loc=4);
savefig("skill_effort_users_cum.png")
clf();
###########################
## Training histories of first 20 agents with final skill > 0.5 and <0.5 for Stage 1 and 2
###########################
final_skill1=np.array(final_skill1)
idx=(final_skill1>0.5)
idxl=(final_skill1<0.5)
ax=subplot(2,2,1);
ax.set_xscale('log');
ax.set_xlabel("Experience")
ax.set_ylabel("Skill")
ax.set_title("Stage1")
ii=0;
for idxx,ID in zip(idx,bureau1.list()):
if(ii>20):
break;
if(not idxx):
continue;
agent = bureau1.member[ID]
I = agent.traininghistory['Skill']
N = np.linspace(1, len(I), len(I), endpoint=True)
# Information contributions:
ax.plot(N, I, color="green", alpha=0.2, linewidth=2.0, linestyle="-")
ax.scatter(N[-1], I[-1], color="green", alpha=0.5)
ii=ii+1
ii=0;
for idxx,ID in zip(idxl,bureau1.list()):
if(ii>20):
break;
if(not idxx):
continue;
agent = bureau1.member[ID]
I = agent.traininghistory['Skill']
N = np.linspace(1, len(I), len(I), endpoint=True)
# Information contributions:
ax.plot(N, I, color="red", alpha=0.2, linewidth=2.0, linestyle="-")
ax.scatter(N[-1], I[-1], color="red", alpha=0.5)
ii=ii+1
final_skill2=np.array(final_skill2)
idx=(final_skill2>0.5)
idxl=(final_skill2<0.5)
ax=subplot(2,2,2);
ax.set_xscale('log');
ax.set_xlabel("Experience")
ax.set_ylabel("Skill")
ax.set_title("Stage2")
for idxx,ID in zip(idx,bureau2.list()):
if(not idxx):
continue;
agent = bureau2.member[ID]
I = agent.traininghistory['Skill']
N = np.linspace(1, len(I), len(I), endpoint=True)
# Information contributions:
ax.plot(N, I, color="green", alpha=0.2, linewidth=2.0, linestyle="-")
ax.scatter(N[-1], I[-1], color="green", alpha=0.5)
ii=0;
for idxx,ID in zip(idxl,bureau2.list()):
if(ii>20):
break;
if(not idxx):
continue;
agent = bureau2.member[ID]
I = agent.traininghistory['Skill']
N = np.linspace(1, len(I), len(I), endpoint=True)
# Information contributions:
ax.plot(N, I, color="red", alpha=0.2, linewidth=2.0, linestyle="-")
ax.scatter(N[-1], I[-1], color="red", alpha=0.5)
ii=ii+1
tight_layout();
savefig("skill_experience.png")
clf();
###########################
## function to plot 2d histograms
###########################
def plothist2d(xx,yy,zz,which,xlab,ylab,ztitle):
bins=100;
ax=subplot(2,2,which);
xx=np.array(xx)
yy=np.array(yy)
zz=np.array(zz)
idx=np.where(xx>0)
hist2d(np.log10(xx[idx]),yy[idx],bins,weights=zz[idx],norm=matplotlib.colors.LogNorm());
cbar=colorbar();
cbar.solids.set_edgecolor("face");
ax.set_xlabel(xlab);
ax.set_ylabel(ylab);
ax.set_title(ztitle);
###########################
## Contribution as a function of experience vs. skill
###########################
plothist2d(experience1,final_skill1,contribution1,1,"Log(Experience)","Skill","Stage 1 Contribution")
plothist2d(experience2,final_skill2,contribution2,2,"Log(Experience)","Skill","Stage 2 Contribution")
savefig("experience_skill_contribution.png")
clf();
###########################
## Contribution as a function of effort vs. skill
###########################
plothist2d(effort1,final_skill1,contribution1,1,"Log(Effort)","Skill","Stage 1 Contribution")
plothist2d(effort2,final_skill2,contribution2,2,"Log(Effort)","Skill","Stage 2 Contribution")
savefig("effort_skill_contribution.png")
clf();
if __name__ == '__main__':
make_info_plots(sys.argv[1:])
|
mit
|
jorge2703/scikit-learn
|
sklearn/feature_extraction/tests/test_text.py
|
110
|
34127
|
from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
|
bsd-3-clause
|
chrsrds/scikit-learn
|
examples/linear_model/plot_polynomial_interpolation.py
|
168
|
2088
|
#!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
|
bsd-3-clause
|
makism/dyfunconn
|
setup_tag.py
|
1
|
2149
|
#!/usr/bin/env python
# based on:
# https://github.com/marcindulak/python-mycli/blob/master/setup.py#L34
import os
from setuptools import setup
def fetch_version_from_file():
""" Fetch the version string from a file. If the file doesn't exist the setup will exit. """
with open("TAG_VERSION", "r") as fp:
version = fp.read()
return version
return None
name = "dyconnmap"
rootdir = os.path.abspath(os.path.dirname(__file__))
packages = []
for dirname, dirnames, filenames in os.walk(name):
if "__init__.py" in filenames:
packages.append(dirname.replace("/", "."))
data_files = []
for extra_dirs in ("docs", "examples", "tests"):
for dirname, dirnames, filenames in os.walk(extra_dirs):
fileslist = []
for filename in filenames:
fullname = os.path.join(dirname, filename)
fileslist.append(fullname)
data_files.append(("share/" + name + "/" + dirname, fileslist))
setup(
name="dyconnmap",
version=fetch_version_from_file(),
description="A dynamic connectome mapping module in Python",
author="Avraam Marimpis, Stavros Dimitriadis",
author_email="[email protected], [email protected]",
license="BSD",
keywords="eeg fMRI meg connectivity graphs neuroimage brain",
url="https://github.com/makism/dyconnmap",
python_requires="~=3.6",
packages=packages,
install_requires=[
"numpy",
"scipy",
"networkx",
"matplotlib",
"statsmodels",
"scikit-learn",
"bctpy",
],
package_dir={"dyconnmap": "dyconnmap"},
data_files=data_files,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3.6",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
],
)
|
bsd-3-clause
|
jereze/scikit-learn
|
examples/ensemble/plot_gradient_boosting_quantile.py
|
392
|
2114
|
"""
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
|
bsd-3-clause
|
mikegraham/dask
|
dask/bag/core.py
|
1
|
51601
|
from __future__ import absolute_import, division, print_function
from collections import Iterable, Iterator, defaultdict
from functools import wraps, partial
import itertools
import math
from operator import getitem
import os
import types
import uuid
from warnings import warn
from distutils.version import LooseVersion
from ..utils import ignoring
from toolz import (merge, take, reduce, valmap, map, partition_all, filter,
remove, compose, curry, first, second, accumulate)
from toolz.compatibility import iteritems, zip
import toolz
_implement_accumulate = LooseVersion(toolz.__version__) > '0.7.4'
try:
import cytoolz
from cytoolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
if LooseVersion(cytoolz.__version__) > '0.7.3':
from cytoolz import accumulate
_implement_accumulate = True
except:
from toolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
from ..base import Base, normalize_token, tokenize
from ..compatibility import apply, unicode, urlopen
from ..context import _globals
from ..core import list2, quote, istask, get_dependencies, reverse_dict
from ..multiprocessing import get as mpget
from ..optimize import fuse, cull, inline
from ..utils import (infer_compression, open, system_encoding,
takes_multiple_arguments, funcname, digit, insert)
no_default = '__no__default__'
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list``
Examples
--------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head in (list, reify):
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks
See Also
--------
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
def inline_singleton_lists(dsk, dependencies=None):
""" Inline lists that are only used once
>>> d = {'b': (list, 'a'),
... 'c': (f, 'b', 1)} # doctest: +SKIP
>>> inline_singleton_lists(d) # doctest: +SKIP
{'c': (f, (list, 'a'), 1)}
Pairs nicely with lazify afterwards
"""
if dependencies is None:
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items()
if istask(v) and v and v[0] is list and len(dependents[k]) == 1]
return inline(dsk, keys, inline_constants=False)
def optimize(dsk, keys, **kwargs):
""" Optimize a dask from a dask.bag """
dsk2, dependencies = cull(dsk, keys)
dsk3, dependencies = fuse(dsk2, keys, dependencies)
dsk4 = inline_singleton_lists(dsk3, dependencies)
dsk5 = lazify(dsk4)
return dsk5
def to_textfiles(b, path, name_function=str, compression='infer',
encoding=system_encoding, compute=True):
""" Write bag to disk, one filename per partition, one line per element
**Paths**: This will create one file for each partition in your bag. You
can specify the filenames in a variety of ways.
Use a globstring
>>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP
The * will be replaced by the increasing sequence 1, 2, ...
::
/path/to/data/0.json.gz
/path/to/data/1.json.gz
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP
::
/path/to/data/2015-01-01.json.gz
/path/to/data/2015-01-02.json.gz
...
You can also provide an explicit list of paths.
>>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP
>>> b.to_textfiles(paths) # doctest: +SKIP
**Compression**: Filenames with extensions corresponding to known
compression algorithms (gz, bz2) will be compressed accordingly.
**Bag Contents**: The bag calling ``to_textfiles`` _must_ be a bag of
text strings. For example, a bag of dictionaries could be written to
JSON text files by mapping ``json.dumps``on to the bag first, and
then calling ``to_textfiles``:
>>> b_dict.map(json.dumps).to_textfiles("/path/to/data/*.json") # doctest: +SKIP
"""
if isinstance(path, (str, unicode)):
if '*' in path:
paths = [path.replace('*', name_function(i))
for i in range(b.npartitions)]
else:
paths = [os.path.join(path, '%s.part' % name_function(i))
for i in range(b.npartitions)]
elif isinstance(path, (tuple, list, set)):
assert len(path) == b.npartitions
paths = path
else:
raise ValueError("""Path should be either"
1. A list of paths -- ['foo.json', 'bar.json', ...]
2. A directory -- 'foo/
3. A path with a * in it -- 'foo.*.json'""")
def get_compression(path, compression=compression):
if compression == 'infer':
compression = infer_compression(path)
return compression
name = 'to-textfiles-' + uuid.uuid4().hex
dsk = dict(((name, i), (write, (b.name, i), path, get_compression(path),
encoding))
for i, path in enumerate(paths))
result = Bag(merge(b.dask, dsk), name, b.npartitions)
if compute:
result.compute()
else:
return result
def finalize(results):
if isinstance(results, Iterator):
results = list(results)
if isinstance(results[0], Iterable) and not isinstance(results[0], str):
results = toolz.concat(results)
if isinstance(results, Iterator):
results = list(results)
return results
def finalize_item(results):
return results[0]
def unpack_kwargs(kwargs):
""" Extracts dask values from kwargs
Currently only dask.bag.Item and python literal values are supported.
Returns a merged dask graph and a list of [key, val] pairs suitable for
eventually constructing a dict.
"""
dsk = {}
kw_pairs = []
for key, val in iteritems(kwargs):
if isinstance(val, Item):
dsk.update(val.dask)
val = val.key
# TODO elif isinstance(val, Value):
elif isinstance(val, Base):
raise NotImplementedError(
'%s not supported as kwarg value to Bag.map_partitions'
% type(val).__name__)
kw_pairs.append([key, val])
return dsk, kw_pairs
class Item(Base):
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize_item)
@staticmethod
def from_imperative(value):
warn("Deprecation warning: moved to from_delayed")
return from_delayed(value)
@staticmethod
def from_delayed(value):
""" Create bag item from a dask.delayed value
Parameters
----------
value: a Value
A single dask.delayed.Value object, such as come from dask.do
Returns
-------
Item
Examples
--------
>>> b = db.Item.from_delayed(x) # doctest: +SKIP
"""
from dask.delayed import Value
assert isinstance(value, Value)
return Item(value.dask, value.key)
def __init__(self, dsk, key):
self.dask = dsk
self.key = key
self.name = key
def _keys(self):
return [self.key]
def apply(self, func):
name = 'apply-{0}-{1}'.format(funcname(func), tokenize(self, func))
dsk = {name: (func, self.key)}
return Item(merge(self.dask, dsk), name)
__int__ = __float__ = __complex__ = __bool__ = Base.compute
def to_imperative(self):
warn("Deprecation warning: moved to to_delayed")
return self.to_delayed()
def to_delayed(self):
""" Convert bag item to dask Value
Returns a single value.
"""
from dask.delayed import Value
return Value(self.key, [self.dask])
class Bag(Base):
""" Parallel collection of Python objects
Examples
--------
Create Bag from sequence
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) # doctest: +SKIP
[0, 20, 40]
Create Bag from filename or globstring of filenames
>>> b = db.read_text('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP
Create manually (expert use)
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize)
def __init__(self, dsk, name, npartitions):
self.dask = dsk
self.name = name
self.npartitions = npartitions
self.str = StringAccessor(self)
def __str__(self):
name = self.name if len(self.name) < 10 else self.name[:7] + '...'
return 'dask.bag<%s, npartitions=%d>' % (name, self.npartitions)
__repr__ = __str__
def map(self, func, **kwargs):
""" Map a function across all elements in collection
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 10, 20, 30, 40]
Keyword arguments are passed through to ``func``. These can be either
``dask.bag.Item``, or normal python objects.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(1, 101), npartitions=10)
>>> def div(num, den=1):
... return num / den
Using a python object:
>>> hi = b.max().compute()
>>> hi
100
>>> b.map(div, den=hi).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Using an ``Item``:
>>> b.map(div, den=b.max()).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Note that while both versions give the same output, the second forms a
single graph, and then computes everything at once, and in some cases
may be more efficient.
"""
name = 'map-{0}-{1}'.format(funcname(func),
tokenize(self, func, kwargs))
if takes_multiple_arguments(func):
func = partial(apply, func)
dsk = self.dask.copy()
if kwargs:
kw_dsk, kw_pairs = unpack_kwargs(kwargs)
dsk.update(kw_dsk)
func = (apply, partial, [func], (dict, kw_pairs))
dsk.update(((name, i), (reify, (map, func, (self.name, i))))
for i in range(self.npartitions))
return type(self)(dsk, name, self.npartitions)
@property
def _args(self):
return (self.dask, self.name, self.npartitions)
def filter(self, predicate):
""" Filter elements in collection by a predicate function
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(iseven)) # doctest: +SKIP
[0, 2, 4]
"""
name = 'filter-{0}-{1}'.format(funcname(predicate),
tokenize(self, predicate))
dsk = dict(((name, i), (reify, (filter, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def remove(self, predicate):
""" Remove elements in collection that match predicate
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.remove(iseven)) # doctest: +SKIP
[1, 3]
"""
name = 'remove-{0}-{1}'.format(funcname(predicate),
tokenize(self, predicate))
dsk = dict(((name, i), (reify, (remove, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def map_partitions(self, func, **kwargs):
""" Apply function to every partition within collection
Note that this requires you to understand how dask.bag partitions your
data and so is somewhat internal.
>>> b.map_partitions(myfunc) # doctest: +SKIP
Keyword arguments are passed through to ``func``. These can be either
``dask.bag.Item``, or normal python objects.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(1, 101), npartitions=10)
>>> def div(nums, den=1):
... return [num / den for num in nums]
Using a python object:
>>> hi = b.max().compute()
>>> hi
100
>>> b.map_partitions(div, den=hi).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Using an ``Item``:
>>> b.map_partitions(div, den=b.max()).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Note that while both versions give the same output, the second forms a
single graph, and then computes everything at once, and in some cases
may be more efficient.
"""
name = 'map-partitions-{0}-{1}'.format(funcname(func),
tokenize(self, func, kwargs))
dsk = self.dask.copy()
if kwargs:
kw_dsk, kw_pairs = unpack_kwargs(kwargs)
dsk.update(kw_dsk)
dsk.update(((name, i),
(apply, func, [(self.name, i)], (dict, kw_pairs))
if kwargs else (func, (self.name, i)))
for i in range(self.npartitions))
return type(self)(dsk, name, self.npartitions)
def pluck(self, key, default=no_default):
""" Select item from all tuples/dicts in collection
>>> b = from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},
... {'name': 'Bob', 'credits': [10, 20]}])
>>> list(b.pluck('name')) # doctest: +SKIP
['Alice', 'Bob']
>>> list(b.pluck('credits').pluck(0)) # doctest: +SKIP
[1, 10]
"""
name = 'pluck-' + tokenize(self, key, default)
key = quote(key)
if default == no_default:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i), default)))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def unzip(self, n):
"""Transform a bag of tuples to ``n`` bags of their elements.
Examples
--------
>>> b = from_sequence([(i, i + 1, i + 2) for i in range(10)])
>>> first, second, third = b.unzip(3)
>>> isinstance(first, Bag)
True
>>> first.compute()
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Note that this is equivalent to:
>>> first, second, third = (b.pluck(i) for i in range(3))
"""
return tuple(self.pluck(i) for i in range(n))
@wraps(to_textfiles)
def to_textfiles(self, path, name_function=str, compression='infer',
encoding=system_encoding, compute=True):
return to_textfiles(self, path, name_function, compression, encoding, compute)
def fold(self, binop, combine=None, initial=no_default, split_every=None):
""" Parallelizable reduction
Fold is like the builtin function ``reduce`` except that it works in
parallel. Fold takes two binary operator functions, one to reduce each
partition of our dataset and another to combine results between
partitions
1. ``binop``: Binary operator to reduce within each partition
2. ``combine``: Binary operator to combine results from binop
Sequentially this would look like the following:
>>> intermediates = [reduce(binop, part) for part in partitions] # doctest: +SKIP
>>> final = reduce(combine, intermediates) # doctest: +SKIP
If only one function is given then it is used for both functions
``binop`` and ``combine`` as in the following example to compute the
sum:
>>> def add(x, y):
... return x + y
>>> b = from_sequence(range(5))
>>> b.fold(add).compute() # doctest: +SKIP
10
In full form we provide both binary operators as well as their default
arguments
>>> b.fold(binop=add, combine=add, initial=0).compute() # doctest: +SKIP
10
More complex binary operators are also doable
>>> def add_to_set(acc, x):
... ''' Add new element x to set acc '''
... return acc | set([x])
>>> b.fold(add_to_set, set.union, initial=set()).compute() # doctest: +SKIP
{1, 2, 3, 4, 5}
See Also
--------
Bag.foldby
"""
combine = combine or binop
initial = quote(initial)
if initial is not no_default:
return self.reduction(curry(_reduce, binop, initial=initial),
curry(_reduce, combine),
split_every=split_every)
else:
from toolz.curried import reduce
return self.reduction(reduce(binop), reduce(combine),
split_every=split_every)
def frequencies(self, split_every=None):
""" Count number of occurrences of each distinct element
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> dict(b.frequencies()) # doctest: +SKIP
{'Alice': 2, 'Bob', 1}
"""
return self.reduction(frequencies, merge_frequencies,
out_type=Bag, split_every=split_every,
name='frequencies').map_partitions(dictitems)
def topk(self, k, key=None, split_every=None):
""" K largest elements in collection
Optionally ordered by some key function
>>> b = from_sequence([10, 3, 5, 7, 11, 4])
>>> list(b.topk(2)) # doctest: +SKIP
[11, 10]
>>> list(b.topk(2, lambda x: -x)) # doctest: +SKIP
[3, 4]
"""
if key:
if callable(key) and takes_multiple_arguments(key):
key = partial(apply, key)
func = partial(topk, k, key=key)
else:
func = partial(topk, k)
return self.reduction(func, compose(func, toolz.concat), out_type=Bag,
split_every=split_every, name='topk')
def distinct(self):
""" Distinct elements of collection
Unordered without repeats.
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> sorted(b.distinct())
['Alice', 'Bob']
"""
return self.reduction(set, curry(apply, set.union), out_type=Bag,
name='distinct')
def reduction(self, perpartition, aggregate, split_every=None,
out_type=Item, name=None):
""" Reduce collection with reduction operators
Parameters
----------
perpartition: function
reduction to apply to each partition
aggregate: function
reduction to apply to the results of all partitions
split_every: int (optional)
Group partitions into groups of this size while performing reduction
Defaults to 8
out_type: {Bag, Item}
The out type of the result, Item if a single element, Bag if a list
of elements. Defaults to Item.
Examples
--------
>>> b = from_sequence(range(10))
>>> b.reduction(sum, sum).compute()
45
"""
if split_every is None:
split_every = 8
if split_every is False:
split_every = self.npartitions
token = tokenize(self, perpartition, aggregate, split_every)
a = '%s-part-%s' % (name or funcname(perpartition), token)
dsk = dict(((a, i), (perpartition, (self.name, i)))
for i in range(self.npartitions))
k = self.npartitions
b = a
fmt = '%s-aggregate-%s' % (name or funcname(aggregate), token)
depth = 0
while k > 1:
c = fmt + str(depth)
dsk2 = dict(((c, i), (aggregate, [(b, j) for j in inds]))
for i, inds in enumerate(partition_all(split_every,
range(k))))
dsk.update(dsk2)
k = len(dsk2)
b = c
depth += 1
if out_type is Item:
dsk[b] = dsk.pop((b, 0))
return Item(merge(self.dask, dsk), b)
else:
return Bag(merge(self.dask, dsk), b, 1)
@wraps(sum)
def sum(self, split_every=None):
return self.reduction(sum, sum, split_every=split_every)
@wraps(max)
def max(self, split_every=None):
return self.reduction(max, max, split_every=split_every)
@wraps(min)
def min(self, split_every=None):
return self.reduction(min, min, split_every=split_every)
@wraps(any)
def any(self, split_every=None):
return self.reduction(any, any, split_every=split_every)
@wraps(all)
def all(self, split_every=None):
return self.reduction(all, all, split_every=split_every)
def count(self, split_every=None):
""" Count the number of elements """
return self.reduction(count, sum, split_every=split_every)
def mean(self):
""" Arithmetic mean """
def mean_chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def mean_aggregate(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self.reduction(mean_chunk, mean_aggregate, split_every=False)
def var(self, ddof=0):
""" Variance """
def var_chunk(seq):
squares, total, n = 0.0, 0.0, 0
for x in seq:
squares += x**2
total += x
n += 1
return squares, total, n
def var_aggregate(x):
squares, totals, counts = list(zip(*x))
x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)
result = (x2 / n) - (x / n)**2
return result * n / (n - ddof)
return self.reduction(var_chunk, var_aggregate, split_every=False)
def std(self, ddof=0):
""" Standard deviation """
return self.var(ddof=ddof).apply(math.sqrt)
def join(self, other, on_self, on_other=None):
""" Join collection with another collection
Other collection must be an Iterable, and not a Bag.
>>> people = from_sequence(['Alice', 'Bob', 'Charlie'])
>>> fruit = ['Apple', 'Apricot', 'Banana']
>>> list(people.join(fruit, lambda x: x[0])) # doctest: +SKIP
[('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]
"""
assert isinstance(other, Iterable)
assert not isinstance(other, Bag)
if on_other is None:
on_other = on_self
name = 'join-' + tokenize(self, other, on_self, on_other)
dsk = dict(((name, i), (list, (join, on_other, other,
on_self, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags """
assert isinstance(other, Bag)
name = 'product-' + tokenize(self, other)
n, m = self.npartitions, other.npartitions
dsk = dict(((name, i*m + j),
(list, (itertools.product, (self.name, i),
(other.name, j))))
for i in range(n) for j in range(m))
return type(self)(merge(self.dask, other.dask, dsk), name, n*m)
def foldby(self, key, binop, initial=no_default, combine=None,
combine_initial=no_default):
""" Combined reduction and groupby
Foldby provides a combined groupby and reduce for efficient parallel
split-apply-combine tasks.
The computation
>>> b.foldby(key, binop, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP
But uses minimal communication and so is *much* faster.
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> add = lambda x, y: x + y
>>> dict(b.foldby(iseven, add)) # doctest: +SKIP
{True: 20, False: 25}
**Key Function**
The key function determines how to group the elements in your bag.
In the common case where your bag holds dictionaries then the key
function often gets out one of those elements.
>>> def key(x):
... return x['name']
This case is so common that it is special cased, and if you provide a
key that is not a callable function then dask.bag will turn it into one
automatically. The following are equivalent:
>>> b.foldby(lambda x: x['name'], ...) # doctest: +SKIP
>>> b.foldby('name', ...) # doctest: +SKIP
**Binops**
It can be tricky to construct the right binary operators to perform
analytic queries. The ``foldby`` method accepts two binary operators,
``binop`` and ``combine``. Binary operators two inputs and output must
have the same type.
Binop takes a running total and a new element and produces a new total:
>>> def binop(total, x):
... return total + x['amount']
Combine takes two totals and combines them:
>>> def combine(total1, total2):
... return total1 + total2
Each of these binary operators may have a default first value for
total, before any other value is seen. For addition binary operators
like above this is often ``0`` or the identity element for your
operation.
>>> b.foldby('name', binop, 0, combine, 0) # doctest: +SKIP
See Also
--------
toolz.reduceby
pyspark.combineByKey
"""
token = tokenize(self, key, binop, initial, combine, combine_initial)
a = 'foldby-a-' + token
b = 'foldby-b-' + token
if combine is None:
combine = binop
if initial is not no_default:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i)))
for i in range(self.npartitions))
def combine2(acc, x):
return combine(acc, x[1])
if combine_initial is not no_default:
dsk2 = {(b, 0): (dictitems, (
reduceby, 0, combine2, (
toolz.concat, (
map, dictitems, list(dsk.keys()))),
combine_initial))}
else:
dsk2 = {(b, 0): (dictitems, (
merge_with, (partial, reduce, combine),
list(dsk.keys())))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def take(self, k, compute=True):
""" Take the first k elements
Evaluates by default, use ``compute=False`` to avoid computation.
Only takes from the first partition
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
(0, 1, 2)
"""
name = 'take-' + tokenize(self, k)
dsk = {(name, 0): (list, (take, k, (self.name, 0)))}
b = Bag(merge(self.dask, dsk), name, 1)
if compute:
return tuple(b.compute())
else:
return b
def _keys(self):
return [(self.name, i) for i in range(self.npartitions)]
def concat(self):
""" Concatenate nested lists into one long list
>>> b = from_sequence([[1], [2, 3]])
>>> list(b)
[[1], [2, 3]]
>>> list(b.concat())
[1, 2, 3]
"""
name = 'concat-' + tokenize(self)
dsk = dict(((name, i), (list, (toolz.concat, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def __iter__(self):
return iter(self.compute())
def groupby(self, grouper, method=None, npartitions=None, blocksize=2**20,
max_branch=None):
""" Group collection by key function
This requires a full dataset read, serialization and shuffle.
This is expensive. If possible you should use ``foldby``.
Parameters
----------
grouper: function
Function on which to group elements
method: str
Either 'disk' for an on-disk shuffle or 'tasks' to use the task
scheduling framework. Use 'disk' if you are on a single machine
and 'tasks' if you are on a distributed cluster.
npartitions: int
If using the disk-based shuffle, the number of output partitions
blocksize: int
If using the disk-based shuffle, the size of shuffle blocks
max_branch: int
If using the task-based shuffle, the amount of splitting each
partition undergoes. Increase this for fewer copies but more
scheduler overhead.
Examples
--------
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> dict(b.groupby(iseven)) # doctest: +SKIP
{True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}
See Also
--------
Bag.foldby
"""
if method is None:
get = _globals.get('get')
if (isinstance(get, types.MethodType) and
'distributed' in get.__func__.__module__):
method = 'tasks'
else:
method = 'disk'
if method == 'disk':
return groupby_disk(self, grouper, npartitions=npartitions,
blocksize=blocksize)
elif method == 'tasks':
return groupby_tasks(self, grouper, max_branch=max_branch)
else:
raise NotImplementedError(
"Shuffle method must be 'disk' or 'tasks'")
def to_dataframe(self, columns=None):
""" Convert Bag to dask.dataframe
Bag should contain tuple or dict records.
Provide ``columns=`` keyword arg to specify column names.
Index will not be particularly meaningful. Use ``reindex`` afterwards
if necessary.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},
... {'name': 'Bob', 'balance': 200},
... {'name': 'Charlie', 'balance': 300}],
... npartitions=2)
>>> df = b.to_dataframe()
>>> df.compute()
balance name
0 100 Alice
1 200 Bob
0 300 Charlie
"""
import pandas as pd
import dask.dataframe as dd
if columns is None:
head = self.take(1)[0]
if isinstance(head, dict):
columns = sorted(head)
elif isinstance(head, (tuple, list)):
columns = list(range(len(head)))
name = 'to_dataframe-' + tokenize(self, columns)
DataFrame = partial(pd.DataFrame, columns=columns)
dsk = dict(((name, i), (DataFrame, (list2, (self.name, i))))
for i in range(self.npartitions))
divisions = [None] * (self.npartitions + 1)
return dd.DataFrame(merge(optimize(self.dask, self._keys()), dsk),
name, columns, divisions)
def to_imperative(self):
warn("Deprecation warning: moved to to_delayed")
return self.to_delayed()
def to_delayed(self):
""" Convert bag to dask Values
Returns list of values, one value per partition.
"""
from dask.delayed import Value
return [Value(k, [self.dask]) for k in self._keys()]
def repartition(self, npartitions):
""" Coalesce bag into fewer partitions
Examples
--------
>>> b.repartition(5) # set to have 5 partitions # doctest: +SKIP
"""
if npartitions > self.npartitions:
raise NotImplementedError(
"Repartition only supports going to fewer partitions\n"
" old: %d new: %d" % (self.npartitions, npartitions))
size = self.npartitions / npartitions
L = [int(i * size) for i in range(npartitions + 1)]
name = 'repartition-%d-%s' % (npartitions, self.name)
dsk = dict(((name, i), (list,
(toolz.concat,
[(self.name, j) for j in range(L[i], L[i + 1])]
)))
for i in range(npartitions))
return Bag(merge(self.dask, dsk), name, npartitions)
def accumulate(self, binop, initial=no_default):
"""Repeatedly apply binary function to a sequence, accumulating results.
Examples
--------
>>> from operator import add
>>> b = from_sequence([1, 2, 3, 4, 5], npartitions=2)
>>> b.accumulate(add).compute() # doctest: +SKIP
[1, 3, 6, 10, 15]
Accumulate also takes an optional argument that will be used as the
first value.
>>> b.accumulate(add, -1) # doctest: +SKIP
[-1, 0, 2, 5, 9, 15]
"""
if not _implement_accumulate:
raise NotImplementedError("accumulate requires `toolz` > 0.7.4"
" or `cytoolz` > 0.7.3.")
token = tokenize(self, binop, initial)
binop_name = funcname(binop)
a = '%s-part-%s' % (binop_name, token)
b = '%s-first-%s' % (binop_name, token)
c = '%s-second-%s' % (binop_name, token)
dsk = {(a, 0): (accumulate_part, binop, (self.name, 0), initial, True),
(b, 0): (first, (a, 0)),
(c, 0): (second, (a, 0))}
for i in range(1, self.npartitions):
dsk[(a, i)] = (accumulate_part, binop, (self.name, i),
(c, i - 1))
dsk[(b, i)] = (first, (a, i))
dsk[(c, i)] = (second, (a, i))
return Bag(merge(self.dask, dsk), b, self.npartitions)
def accumulate_part(binop, seq, initial, is_first=False):
if initial == no_default:
res = list(accumulate(binop, seq))
else:
res = list(accumulate(binop, seq, initial=initial))
if is_first:
return res, res[-1] if res else [], initial
return res[1:], res[-1]
normalize_token.register(Item, lambda a: a.key)
normalize_token.register(Bag, lambda a: a.name)
def partition(grouper, sequence, npartitions, p, nelements=2**20):
""" Partition a bag along a grouper, store partitions on disk """
for block in partition_all(nelements, sequence):
d = groupby(grouper, block)
d2 = defaultdict(list)
for k, v in d.items():
d2[abs(hash(k)) % npartitions].extend(v)
p.append(d2)
return p
def collect(grouper, group, p, barrier_token):
""" Collect partitions from disk and yield k,v group pairs """
d = groupby(grouper, p.get(group, lock=False))
return list(d.items())
def from_filenames(filenames, chunkbytes=None, compression='infer',
encoding=system_encoding, linesep=os.linesep):
""" Deprecated. See read_text """
warn("db.from_filenames is deprecated in favor of db.read_text")
from .text import read_text
return read_text(filenames, blocksize=chunkbytes, compression=compression,
encoding=encoding, linedelimiter=linesep)
def write(data, filename, compression, encoding):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
with ignoring(OSError):
os.makedirs(dirname)
f = open(filename, mode='wb', compression=compression)
# Check presence of endlines
data = iter(data)
try:
firstline = next(data)
except StopIteration:
f.close()
return
if not (firstline.endswith(os.linesep) or firstline.endswith('\n')):
sep = os.linesep if firstline.endswith(os.linesep) else '\n'
firstline = firstline + sep
data = (line + sep for line in data)
f.write(firstline.encode(encoding))
try:
lastline = ''
for line in data:
f.write(lastline.encode(encoding))
lastline = line
f.write(lastline.rstrip(os.linesep).encode(encoding))
finally:
f.close()
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Examples
--------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See Also
--------
read_text: Create bag from textfiles
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = 'from_sequence-' + tokenize(seq, partition_size)
d = dict(((name, i), part) for i, part in enumerate(parts))
return Bag(d, name, len(d))
def from_castra(x, columns=None, index=False):
"""Load a dask Bag from a Castra.
Parameters
----------
x : filename or Castra
columns: list or string, optional
The columns to load. Default is all columns.
index: bool, optional
If True, the index is included as the first element in each tuple.
Default is False.
"""
from castra import Castra
if not isinstance(x, Castra):
x = Castra(x, readonly=True)
elif not x._readonly:
x = Castra(x.path, readonly=True)
if columns is None:
columns = x.columns
name = 'from-castra-' + tokenize(os.path.getmtime(x.path), x.path,
columns, index)
dsk = dict(((name, i), (load_castra_partition, x, part, columns, index))
for i, part in enumerate(x.partitions))
return Bag(dsk, name, len(x.partitions))
def load_castra_partition(castra, part, columns, index):
import blosc
# Due to serialization issues, blosc needs to be manually initialized in
# each process.
blosc.init()
df = castra.load_partition(part, columns)
if isinstance(columns, list):
items = df.itertuples(index)
else:
items = df.iteritems() if index else iter(df)
items = list(items)
if items and isinstance(items[0], tuple) and type(items[0]) is not tuple:
names = items[0]._fields
items = [dict(zip(names, item)) for item in items]
return items
def from_url(urls):
"""Create a dask.bag from a url
>>> a = from_url('http://raw.githubusercontent.com/dask/dask/master/README.rst') # doctest: +SKIP
>>> a.npartitions # doctest: +SKIP
1
>>> a.take(8) # doctest: +SKIP
('Dask\\n',
'====\\n',
'\\n',
'|Build Status| |Coverage| |Doc Status| |Gitter|\\n',
'\\n',
'Dask provides multi-core execution on larger-than-memory datasets using blocked\\n',
'algorithms and task scheduling. It maps high-level NumPy and list operations\\n',
'on large datasets on to graphs of many operations on small in-memory datasets.\\n')
>>> b = from_url(['http://github.com', 'http://google.com']) # doctest: +SKIP
>>> b.npartitions # doctest: +SKIP
2
"""
if isinstance(urls, str):
urls = [urls]
name = 'from_url-' + uuid.uuid4().hex
dsk = {}
for i, u in enumerate(urls):
dsk[(name, i)] = (list, (urlopen, u))
return Bag(dsk, name, len(urls))
def dictitems(d):
""" A pickleable version of dict.items
>>> dictitems({'x': 1})
[('x', 1)]
"""
return list(d.items())
def concat(bags):
""" Concatenate many bags together, unioning all elements
>>> import dask.bag as db
>>> a = db.from_sequence([1, 2, 3])
>>> b = db.from_sequence([4, 5, 6])
>>> c = db.concat([a, b])
>>> list(c)
[1, 2, 3, 4, 5, 6]
"""
name = 'concat-' + tokenize(*bags)
counter = itertools.count(0)
dsk = dict(((name, next(counter)), key)
for bag in bags for key in sorted(bag._keys()))
return Bag(merge(dsk, *[b.dask for b in bags]), name, len(dsk))
class StringAccessor(object):
""" String processing functions
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.lower())
['alice smith', 'bob jones', 'charlie smith']
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
>>> list(b.str.split(' '))
[['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]
"""
def __init__(self, bag):
self._bag = bag
def __dir__(self):
return sorted(set(dir(type(self)) + dir(str)))
def _strmap(self, key, *args, **kwargs):
return self._bag.map(lambda s: getattr(s, key)(*args, **kwargs))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(str):
func = getattr(str, key)
return robust_wraps(func)(partial(self._strmap, key))
else:
raise
def match(self, pattern):
""" Filter strings by those that match a pattern
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
See Also
--------
fnmatch.fnmatch
"""
from fnmatch import fnmatch
return self._bag.filter(partial(fnmatch, pat=pattern))
def robust_wraps(wrapper):
""" A weak version of wraps that only copies doc """
def _(wrapped):
wrapped.__doc__ = wrapper.__doc__
return wrapped
return _
def reify(seq):
if isinstance(seq, Iterator):
seq = list(seq)
if seq and isinstance(seq[0], Iterator):
seq = list(map(list, seq))
return seq
def from_imperative(values):
warn("Deprecation warning: moved to from_delayed")
return from_delayed(values)
def from_delayed(values):
""" Create bag from many dask.delayed objects
Parameters
----------
values: list of Values
An iterable of dask.delayed.Value objects, such as come from dask.do
These comprise the individual partitions of the resulting bag
Returns
-------
Bag
Examples
--------
>>> b = from_delayed([x, y, z]) # doctest: +SKIP
"""
from dask.delayed import Value
if isinstance(values, Value):
values = [values]
dsk = merge(v.dask for v in values)
name = 'bag-from-delayed-' + tokenize(*values)
names = [(name, i) for i in range(len(values))]
values = [v.key for v in values]
dsk2 = dict(zip(names, values))
return Bag(merge(dsk, dsk2), name, len(values))
def merge_frequencies(seqs):
first, rest = seqs[0], seqs[1:]
if not rest:
return first
out = defaultdict(int)
out.update(first)
for d in rest:
for k, v in iteritems(d):
out[k] += v
return out
def bag_range(n, npartitions):
""" Numbers from zero to n
Examples
--------
>>> import dask.bag as db
>>> b = db.range(5, npartitions=2)
>>> list(b)
[0, 1, 2, 3, 4]
"""
size = n // npartitions
name = 'range-%d-npartitions-%d' % (n, npartitions)
ijs = list(enumerate(take(npartitions, range(0, n, size))))
dsk = dict(((name, i), (reify, (range, j, min(j + size, n))))
for i, j in ijs)
if n % npartitions != 0:
i, j = ijs[-1]
dsk[(name, i)] = (reify, (range, j, n))
return Bag(dsk, name, npartitions)
def bag_zip(*bags):
""" Partition-wise bag zip
All passed bags must have the same number of partitions.
NOTE: corresponding partitions should have the same length; if they do not,
the "extra" elements from the longer partition(s) will be dropped. If you
have this case chances are that what you really need is a data alignment
mechanism like pandas's, and not a missing value filler like zip_longest.
Examples
--------
Correct usage:
>>> import dask.bag as db
>>> evens = db.from_sequence(range(0, 10, 2), partition_size=4)
>>> odds = db.from_sequence(range(1, 10, 2), partition_size=4)
>>> pairs = db.zip(evens, odds)
>>> list(pairs)
[(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]
Incorrect usage:
>>> numbers = db.range(20) # doctest: +SKIP
>>> fizz = numbers.filter(lambda n: n % 3 == 0) # doctest: +SKIP
>>> buzz = numbers.filter(lambda n: n % 5 == 0) # doctest: +SKIP
>>> fizzbuzz = db.zip(fizz, buzz) # doctest: +SKIP
>>> list(fizzbuzzz) # doctest: +SKIP
[(0, 0), (3, 5), (6, 10), (9, 15), (12, 20), (15, 25), (18, 30)]
When what you really wanted was more along the lines of:
>>> list(fizzbuzzz) # doctest: +SKIP
[(0, 0), (3, None), (None, 5), (6, None), (None 10), (9, None),
(12, None), (15, 15), (18, None), (None, 20), (None, 25), (None, 30)]
"""
npartitions = bags[0].npartitions
assert all(bag.npartitions == npartitions for bag in bags)
# TODO: do more checks
name = 'zip-' + tokenize(*bags)
dsk = dict(
((name, i), (reify, (zip,) + tuple((bag.name, i) for bag in bags)))
for i in range(npartitions))
bags_dsk = merge(*(bag.dask for bag in bags))
return Bag(merge(bags_dsk, dsk), name, npartitions)
def _reduce(binop, sequence, initial=no_default):
if initial is not no_default:
return reduce(binop, sequence, initial)
else:
return reduce(binop, sequence)
def make_group(k, stage):
def h(x):
return x[0] // k ** stage % k
return h
def groupby_tasks(b, grouper, hash=hash, max_branch=32):
max_branch = max_branch or 32
n = b.npartitions
stages = int(math.ceil(math.log(n) / math.log(max_branch)))
if stages > 1:
k = int(math.ceil(n ** (1 / stages)))
else:
k = n
groups = []
splits = []
joins = []
inputs = [tuple(digit(i, j, k) for j in range(stages))
for i in range(n)]
sinputs = set(inputs)
b2 = b.map(lambda x: (hash(grouper(x)), x))
token = tokenize(b, grouper, hash, max_branch)
start = dict((('shuffle-join-' + token, 0, inp), (b2.name, i))
for i, inp in enumerate(inputs))
for stage in range(1, stages + 1):
group = dict((('shuffle-group-' + token, stage, inp),
(groupby,
(make_group, k, stage - 1),
('shuffle-join-' + token, stage - 1, inp)))
for inp in inputs)
split = dict((('shuffle-split-' + token, stage, i, inp),
(dict.get, ('shuffle-group-' + token, stage, inp), i, {}))
for i in range(k)
for inp in inputs)
join = dict((('shuffle-join-' + token, stage, inp),
(list, (toolz.concat,
[('shuffle-split-' + token, stage, inp[stage-1],
insert(inp, stage - 1, j)) for j in range(k)
if insert(inp, stage - 1, j) in sinputs])))
for inp in inputs)
groups.append(group)
splits.append(split)
joins.append(join)
end = dict((('shuffle-' + token, i), (list, (pluck, 1, j)))
for i, j in enumerate(join))
dsk = merge(b2.dask, start, end, *(groups + splits + joins))
return type(b)(dsk, 'shuffle-' + token, n)
def groupby_disk(b, grouper, npartitions=None, blocksize=2**20):
if npartitions is None:
npartitions = b.npartitions
token = tokenize(b, grouper, npartitions, blocksize)
import partd
p = ('partd-' + token,)
try:
dsk1 = {p: (partd.Python, (partd.Snappy, partd.File()))}
except AttributeError:
dsk1 = {p: (partd.Python, partd.File())}
# Partition data on disk
name = 'groupby-part-{0}-{1}'.format(funcname(grouper), token)
dsk2 = dict(((name, i), (partition, grouper, (b.name, i),
npartitions, p, blocksize))
for i in range(b.npartitions))
# Barrier
barrier_token = 'groupby-barrier-' + token
def barrier(args):
return 0
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = 'groupby-collect-' + token
dsk4 = dict(((name, i),
(collect, grouper, i, p, barrier_token))
for i in range(npartitions))
return type(b)(merge(b.dask, dsk1, dsk2, dsk3, dsk4), name,
npartitions)
|
bsd-3-clause
|
lazywei/scikit-learn
|
sklearn/tests/test_pipeline.py
|
162
|
14875
|
"""
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
|
bsd-3-clause
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/pandas/io/msgpack/__init__.py
|
26
|
1233
|
# coding: utf-8
from collections import namedtuple
from pandas.io.msgpack.exceptions import * # noqa
from pandas.io.msgpack._version import version # noqa
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os # noqa
from pandas.io.msgpack._packer import Packer # noqa
from pandas.io.msgpack._unpacker import unpack, unpackb, Unpacker # noqa
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
|
agpl-3.0
|
subutai/htmresearch
|
projects/union_path_integration/entorhinal/plot_gaussian_convergence.py
|
4
|
15129
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Plot recognition time charts."""
from collections import defaultdict
import json
import os
from htmresearch.frameworks.location import ambiguity_index
import matplotlib.lines
import matplotlib.pyplot as plt
import numpy as np
CWD = os.path.dirname(os.path.realpath(__file__))
CHART_DIR = os.path.join(CWD, "charts")
def varyResolution_varyNumModules(inFilenames, outFilename,
resolutions=(2, 3, 4),
moduleCounts=(6, 12, 18,),
xlim=None):
if not os.path.exists(CHART_DIR):
os.makedirs(CHART_DIR)
allResults = defaultdict(lambda: defaultdict(list))
for inFilename in inFilenames:
with open(inFilename, "r") as f:
experiments = json.load(f)
for exp in experiments:
numModules = exp[0]["numModules"]
numObjects = exp[0]["numObjects"]
resolution = exp[0]["inverseReadoutResolution"]
results = []
for numSensationsStr, numOccurrences in exp[1]["convergence"].items():
if numSensationsStr == "null":
results += [np.inf] * numOccurrences
else:
results += [int(numSensationsStr)] * numOccurrences
allResults[(numModules, resolution)][numObjects] += results
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
colors = ("C0", "C1", "C2")
markers = ("o", "o", "o")
markerSizes = (2, 4, 6)
for resolution, color in zip(resolutions, colors):
for numModules, marker, markerSize in zip(moduleCounts, markers, markerSizes):
resultsByNumObjects = allResults[(numModules, resolution)]
expResults = sorted((numObjects, np.median(results))
for numObjects, results in resultsByNumObjects.iteritems())
# Results up to the final non-infinite median.
lineResults = [(numObjects, median)
for numObjects, median in expResults
if median != np.inf]
# Results excluding the final non-infinite median.
numCircleMarkers = len(lineResults)
if len(lineResults) < len(expResults):
numCircleMarkers -= 1
# Results including only the final non-infinite median.
lineEndResults = ([lineResults[-1]] if len(lineResults) < len(expResults)
else [])
ax1.plot([numObjects for numObjects, median in lineResults],
[median for numObjects, median in lineResults],
"{}-".format(marker), markevery=xrange(numCircleMarkers),
color=color, linewidth=1, markersize=markerSize)
if len(lineResults) < len(expResults):
endNumObjects, endMedian = lineEndResults[-1]
ax1.plot([endNumObjects], [endMedian], "x", color=color,
markeredgewidth=markerSize/2, markersize=markerSize*1.5)
if xlim is not None:
ax1.set_xlim(xlim[0], xlim[1])
ax1.set_ylim(0, ax1.get_ylim()[1])
ax1.set_xlabel("# learned objects")
ax1.set_ylabel("Median # sensations before recognition")
ax2.set_xlabel("Sensory ambiguity index", labelpad=8)
leg = ax1.legend(loc="upper right", title=" Readout bins per axis:",
frameon=False,
handles=[matplotlib.lines.Line2D([], [], color=color)
for color in colors],
labels=resolutions)
ax1.add_artist(leg)
leg = ax1.legend(loc="center right", title="Number of modules:",
bbox_to_anchor=(0.99, 0.6),
frameon=False,
handles=[matplotlib.lines.Line2D([], [],
marker=marker,
markersize=markerSize,
color="black")
for marker, markerSize in zip(markers, markerSizes)],
labels=moduleCounts)
locs, labels = ambiguity_index.getTotalExpectedOccurrencesTicks_2_5(
ambiguity_index.numOtherOccurrencesOfMostUniqueFeature_lowerBound80_100features_10locationsPerObject)
ax2.set_xticks(locs)
ax2.set_xticklabels(labels)
ax2.set_xlim(ax1.get_xlim())
ax2_color = 'gray'
ax2.xaxis.label.set_color(ax2_color)
ax2.tick_params(axis='x', colors=ax2_color)
plt.tight_layout()
filePath = os.path.join(CHART_DIR, outFilename)
print "Saving", filePath
plt.savefig(filePath)
def varyModuleSize_varyResolution(inFilenames, outFilename,
enlargeModuleFactors=[1.0, 2.0],
resolutions=(2, 3, 4),
xlim=None):
if not os.path.exists(CHART_DIR):
os.makedirs(CHART_DIR)
allResults = defaultdict(lambda: defaultdict(list))
for inFilename in inFilenames:
with open(inFilename, "r") as f:
experiments = json.load(f)
for exp in experiments:
enlargeModuleFactor = exp[0]["enlargeModuleFactor"]
numObjects = exp[0]["numObjects"]
resolution = exp[0]["inverseReadoutResolution"]
results = []
for numSensationsStr, numOccurrences in exp[1]["convergence"].items():
if numSensationsStr == "null":
results += [np.inf] * numOccurrences
else:
results += [int(numSensationsStr)] * numOccurrences
allResults[(enlargeModuleFactor, resolution)][numObjects] += results
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
colors = ("C0", "C1", "C2")
markers = ("o", "o", "o")
markerSizes = (2, 4, 6)
for resolution, marker, markerSize in zip(resolutions, markers, markerSizes):
for enlargeModuleFactor, color in zip(enlargeModuleFactors, colors):
resultsByNumObjects = allResults[(enlargeModuleFactor, resolution)]
expResults = sorted((numObjects, np.median(results))
for numObjects, results in resultsByNumObjects.iteritems())
# Results up to the final non-infinite median.
lineResults = [(numObjects, median)
for numObjects, median in expResults
if median != np.inf]
# Results excluding the final non-infinite median.
numCircleMarkers = len(lineResults)
if len(lineResults) < len(expResults):
numCircleMarkers -= 1
# Results including only the final non-infinite median.
lineEndResults = ([lineResults[-1]] if len(lineResults) < len(expResults)
else [])
ax1.plot([numObjects for numObjects, median in lineResults],
[median for numObjects, median in lineResults],
"{}-".format(marker), markevery=xrange(numCircleMarkers),
color=color, linewidth=1, markersize=markerSize)
if len(lineResults) < len(expResults):
endNumObjects, endMedian = lineEndResults[-1]
ax1.plot([endNumObjects], [endMedian], "x", color=color,
markeredgewidth=markerSize/2, markersize=markerSize*1.5)
if xlim is not None:
ax1.set_xlim(xlim[0], xlim[1])
ax1.set_ylim(0, ax1.get_ylim()[1])
ax1.set_xlabel("# learned objects")
ax1.set_ylabel("Median # sensations before recognition")
ax2.set_xlabel("Sensory ambiguity index", labelpad=8)
# Carefully use whitespace in title to shift the entries in the legend to
# align with the previous legend.
leg = ax1.legend(loc="upper right", title="Bump size: ",
# bbox_to_anchor=(0.98, 1.0),
frameon=False,
handles=[matplotlib.lines.Line2D([], [], color=color)
for color in colors],
labels=["$ \\sigma = \\sigma_{rat} $",
"$ \\sigma = \\sigma_{rat} / 2.0 $"])
ax1.add_artist(leg)
leg = ax1.legend(loc="center right", title="Readout bins per axis:",
bbox_to_anchor=(1.0, 0.6),
frameon=False,
handles=[matplotlib.lines.Line2D([], [],
marker=marker,
markersize=markerSize,
color="black")
for marker, markerSize in zip(markers, markerSizes)],
labels=["$ \\frac{\\sigma_{rat}}{\\sigma} * 2 $ ",
"$ \\frac{\\sigma_{rat}}{\\sigma} * 3 $ ",
"$ \\frac{\\sigma_{rat}}{\\sigma} * 4 $ "])
locs, labels = ambiguity_index.getTotalExpectedOccurrencesTicks_2_5(
ambiguity_index.numOtherOccurrencesOfMostUniqueFeature_lowerBound80_100features_10locationsPerObject)
ax2.set_xticks(locs)
ax2.set_xticklabels(labels)
ax2.set_xlim(ax1.get_xlim())
ax2_color = 'gray'
ax2.xaxis.label.set_color(ax2_color)
ax2.tick_params(axis='x', colors=ax2_color)
plt.tight_layout()
filePath = os.path.join(CHART_DIR, outFilename)
print "Saving", filePath
plt.savefig(filePath)
def varyModuleSize_varyNumModules(inFilenames, outFilename,
enlargeModuleFactors=[1.0, 2.0, 3.0],
moduleCounts=(6, 12, 18),
xlim=None):
if not os.path.exists(CHART_DIR):
os.makedirs(CHART_DIR)
allResults = defaultdict(lambda: defaultdict(list))
for inFilename in inFilenames:
with open(inFilename, "r") as f:
experiments = json.load(f)
for exp in experiments:
enlargeModuleFactor = exp[0]["enlargeModuleFactor"]
numObjects = exp[0]["numObjects"]
numModules = exp[0]["numModules"]
results = []
for numSensationsStr, numOccurrences in exp[1]["convergence"].items():
if numSensationsStr == "null":
results += [np.inf] * numOccurrences
else:
results += [int(numSensationsStr)] * numOccurrences
allResults[(enlargeModuleFactor, numModules)][numObjects] += results
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
# Optional: swap axes
# ax1.xaxis.tick_top()
# ax1.xaxis.set_label_position('top')
# ax2.xaxis.tick_bottom()
# ax2.xaxis.set_label_position('bottom')
colors = ("C0", "C1", "C2")
markers = ("o", "o", "o")
markerSizes = (2, 4, 6)
for numModules, marker, markerSize in zip(moduleCounts, markers, markerSizes):
for enlargeModuleFactor, color in zip(enlargeModuleFactors, colors):
resultsByNumObjects = allResults[(enlargeModuleFactor, numModules)]
expResults = sorted((numObjects, np.median(results))
for numObjects, results in resultsByNumObjects.iteritems())
# Results up to the final non-infinite median.
lineResults = [(numObjects, median)
for numObjects, median in expResults
if median != np.inf]
# Results excluding the final non-infinite median.
numCircleMarkers = len(lineResults)
if len(lineResults) < len(expResults):
numCircleMarkers -= 1
# Results including only the final non-infinite median.
lineEndResults = ([lineResults[-1]] if len(lineResults) < len(expResults)
else [])
ax1.plot([numObjects for numObjects, median in lineResults],
[median for numObjects, median in lineResults],
"{}-".format(marker), markevery=xrange(numCircleMarkers),
color=color, linewidth=1, markersize=markerSize)
if len(lineResults) < len(expResults):
endNumObjects, endMedian = lineEndResults[-1]
ax1.plot([endNumObjects], [endMedian], "x", color=color,
markeredgewidth=markerSize/2, markersize=markerSize*1.5)
if xlim is not None:
ax1.set_xlim(xlim[0], xlim[1])
ax1.set_ylim(0, ax1.get_ylim()[1])
ax1.set_xlabel("# learned objects")
ax1.set_ylabel("Median # sensations before recognition")
ax2.set_xlabel("Sensory ambiguity index", labelpad=8)
# Carefully use whitespace in title to shift the entries in the legend to
# align with the previous legend.
leg = ax1.legend(loc="upper right", title="Module size: ",
# bbox_to_anchor=(0.98, 1.0),
frameon=False,
handles=[matplotlib.lines.Line2D([], [], color=color)
for color in colors],
labels=["rat", "rat * 2", "rat * 3"])
ax1.add_artist(leg)
leg = ax1.legend(loc="center right", title="Number of modules:",
bbox_to_anchor=(1.0, 0.6),
frameon=False,
handles=[matplotlib.lines.Line2D([], [],
marker=marker,
markersize=markerSize,
color="black")
for marker, markerSize in zip(markers, markerSizes)],
labels=moduleCounts)
locs, labels = ambiguity_index.getTotalExpectedOccurrencesTicks_2_5(
ambiguity_index.numOtherOccurrencesOfMostUniqueFeature_lowerBound80_100features_10locationsPerObject)
ax2.set_xticks(locs)
ax2.set_xticklabels(labels)
ax2.set_xlim(ax1.get_xlim())
ax2_color = 'gray'
ax2.xaxis.label.set_color(ax2_color)
ax2.tick_params(axis='x', colors=ax2_color)
plt.tight_layout()
filePath = os.path.join(CHART_DIR, outFilename)
print "Saving", filePath
plt.savefig(filePath)
if __name__ == "__main__":
varyResolution_varyNumModules(
["results/gaussian_varyNumModules_100_feats_2_resolution.json",
"results/gaussian_varyNumModules_100_feats_3_resolution.json",
"results/gaussian_varyNumModules_100_feats_4_resolution.json"],
"convergence100_gaussian_varyResolution_varyNumModules.pdf",
xlim=(2.5, 167.5))
# varyModuleSize_varyResolution(
# [],
# "recognition_time_varyModuleSize.pdf",
# xlim=(-6.0, 400.0)
# )
varyModuleSize_varyNumModules(
["results/varyModuleSize_100_feats_1_enlarge.json",
"results/varyModuleSize_100_feats_2_enlarge.json",
"results/varyModuleSize_100_feats_3_enlarge.json"],
"convergence100_gaussian_varyModuleSize_varyNumModules.pdf",
xlim=(0, 577.0)
)
|
agpl-3.0
|
GGoussar/scikit-image
|
doc/examples/features_detection/plot_brief.py
|
32
|
1879
|
"""
=======================
BRIEF binary descriptor
=======================
This example demonstrates the BRIEF binary description algorithm.
The descriptor consists of relatively few bits and can be computed using
a set of intensity difference tests. The short binary descriptor results
in low memory footprint and very efficient matching based on the Hamming
distance metric.
BRIEF does not provide rotation-invariance. Scale-invariance can be achieved by
detecting and extracting features at different scales.
"""
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
plot_matches, BRIEF)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.astronaut())
tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = tf.warp(img1, tform)
img3 = tf.rotate(img1, 25)
keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)
extractor = BRIEF()
extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors
extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors
extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')
plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')
plt.show()
|
bsd-3-clause
|
nelson-liu/scikit-learn
|
sklearn/datasets/lfw.py
|
3
|
18687
|
"""Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove, rename
from os.path import join, exists, isdir
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
archive_path_temp = archive_path + ".tmp"
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path_temp)
rename(archive_path_temp, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
|
bsd-3-clause
|
PrashntS/scikit-learn
|
examples/mixture/plot_gmm_classifier.py
|
250
|
3918
|
"""
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
I2Cvb/data_balancing
|
pipeline/feature-classification/classification_melanoma_random_forest.py
|
1
|
7763
|
#title :classiciation_imbalanced_study.py
#description :This will create a header for a python script.
#author :Guillaume Lemaitre, Mojdeh Rastgoo
#date :2016/01/19
#version :0.1
#notes :
#python_version :2.7.6
#==============================================================================
# Import the needed libraries
# Numpy library
import numpy as np
import pandas as pd
import h5py
# Joblib library
### Module to performed parallel processing
from joblib import Parallel, delayed
### Module to performed parallel processing
import multiprocessing
# OS library
import os
from os.path import join, isdir, isfile
# sys library
import sys
# Scikit-learn library
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import MinMaxScaler
from protoclass.classification.classification import Classify
# Initialization to the data paths
dataPath = sys.argv[1]
path_to_save = sys.argv[2]
#fread = pd.read_csv(dataPath.__add__('feature.csv'))
fread = pd.read_csv(join(dataPath, 'feature.csv'))
FeatureLists = fread.values
FeatureLists = FeatureLists[:,0]
#f= h5py.File(dataPath.__add__('PH2_Train_Test_80_20.mat'), 'r')
f = h5py.File(join(dataPath, 'PH2_Train_Test_80_20.mat'), 'r')
#CVIdx = sio.loadmat(datapath.__add__('TrainTestIndex_117_39_80.mat'))
trainIdx = np.asmatrix(f.get('trainingIdx'))
trainIdx = trainIdx.T
trainIdx = trainIdx - 1.
testIdx = np.asmatrix(f.get('testingIdx'))
testIdx = testIdx.T
testIdx = testIdx - 1.
Labels= np.asmatrix(f.get('BinaryLabels'))
Labels = Labels.T
ntree = 100;
config = [{'classifier_str' : 'random-forest', 'n_estimators' : 100},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'random-over-sampling'},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'smote', 'kind_smote' : 'regular'},
#{'classifier_str' : 'random-forest', 'n_estimators' : 100,
#'balancing_criterion' : 'smote', 'kind_smote' : 'borderline1'},
#{'classifier_str' : 'random-forest', 'n_estimators' : 100,
#'balancing_criterion' : 'smote', 'kind_smote' : 'borderline2'},
#{'classifier_str' : 'random-forest', 'n_estimators' : 100,
#'balancing_criterion' : 'smote', 'kind_smote' : 'svm'},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'random-under-sampling', 'replacement' : True},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'tomek_links'},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'clustering'},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'nearmiss', 'version_nearmiss' : 1, 'size_ngh': 3},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'nearmiss', 'version_nearmiss' : 2, 'size_ngh': 3},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'nearmiss', 'version_nearmiss' : 3, 'size_ngh': 3, 'ver3_samp_ngh' : 3},
#{'classifier_str' : 'random-forest', 'n_estimators' : 100,
#'balancing_criterion' : 'cnn', 'size_ngh' : 3, 'n_seeds_S' :1},
#{'classifier_str' : 'random-forest', 'n_estimators' : 100,
#'balancing_criterion' : 'one-sided-selection', 'size_ngh' : 1, 'n_seeds_S' :1},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'ncr', 'size_ngh' : 3},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'easy-ensemble', 'n_subsets' : 10},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'balance-cascade', 'n_max_subset' : 100,
'balancing_classifier' : 'knn', 'bootstrap' : True},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'smote-enn', 'size_ngh' : 3},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'smote-tomek'}]
FeaturesIdx = np.array([[1,0,0,0,0,0], [0,1,0,0,0,0], [0,0,1,0,0,0], [0,0,0,1,0,0], [0,0,0,0,1,0], [0,0,0,0,0,1],\
[1,1,0,0,0,0], [1,0,1,0,0,0], [1,0,0,1,0,0], [0,1,1,0,0,0], [0,1,0,1,0,0], [0,0,1,1,0,0],\
[0,0,0,0,1,1], [1,1,1,1,0,0], [1,0,0,0,1,1], [0,1,0,0,1,1], [0,0,1,0,1,1], [0,0,0,1,1,1],\
[1,1,0,0,1,1], [1,0,1,0,1,1], [1,0,0,1,1,1], [0,1,1,0,1,1], [0,1,0,1,1,1], [0,0,1,1,1,1]])
#[0,1,0,0],[0,0,1,0],[0,0,0,1] , [0,0,1,1], [1,1,0,0],[1,0,1,1],[0,1,1,1],[1,1,1,1]])
for I in range (0, FeaturesIdx.shape[0]):
NonzeroIdx = np.ravel(np.nonzero(FeaturesIdx[I]))
FVcombined = np.empty(shape = [193, 0])
for PIdx in range (0, NonzeroIdx.shape[0]):
f= h5py.File(join(dataPath,FeatureLists[NonzeroIdx[PIdx]]), 'r')
#f = sio.loadmat(join(featurePath, FeatureLists[NonzeroIdx[PIdx]]))
FV =np.asmatrix(f.get('FV'))
FV =FV.T
FVcombined = np.append(FVcombined, FV, axis = 1)
del FV
FV = FVcombined
rocs = []
gt_labels = []
pred_labels = []
pred_probs = []
# Apply the classification for each fold
n_jobs = -5
for CV in range (0, trainIdx.shape[1]):
print 'Iteration #{}'.format(CV)
# Extract the data
### Training
train_data = FV[np.ravel(trainIdx[:,CV].astype(int)), :]
train_label = np.ravel(Labels[np.ravel(trainIdx[:,CV].astype(int))])
### Testing
test_data = FV[np.ravel(testIdx[:,CV].astype(int)), :]
test_label = np.ravel(Labels[np.ravel(testIdx[:,CV].astype(int))])
config_roc = []
config_pred_label = []
config_pred_prob = []
config_gt_label = []
for c in config:
print c
pred_label, pred_prob, roc = Classify(train_data, train_label, test_data, test_label, gs_n_jobs=n_jobs, **c)
config_roc.append(roc)
config_pred_label.append(pred_label)
config_pred_prob.append(pred_prob)
config_gt_label.append(test_label)
rocs.append(config_roc)
pred_labels.append(config_pred_label)
pred_probs.append(config_pred_prob)
gt_labels.append(config_gt_label)
# Convert the data to store to numpy data
rocs = np.array(rocs)
pred_labels = np.array(pred_labels)
pred_probs = np.array(pred_probs)
gt_labels = np.array(gt_labels)
# Reshape the array to have the first index corresponding to the
# configuration, the second index to the iteration of the k-fold
# and the last index to the data themselve.
rocs = np.swapaxes(rocs, 0, 1)
pred_labels = np.swapaxes(pred_labels, 0, 1)
pred_probs = np.swapaxes(pred_probs, 0, 1)
gt_labels = np.swapaxes(gt_labels, 0, 1)
# Save the results somewhere
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
from os.path import basename
saving_filename = 'melanoma_imbalanced_80_20_' + str(ntree) + '_' + str(I)
saving_path = join(path_to_save, saving_filename)
np.savez(saving_path, gt_labels=gt_labels, pred_labels=pred_labels, pred_probs=pred_probs, rocs=rocs)
tosave={}
tosave['rocs'] = rocs
tosave['pred_labels'] = pred_labels
tosave['pred_probs'] = pred_probs
tosave['gt_labels'] = gt_labels
saving_path = join(path_to_save, saving_filename)
from scipy.io import savemat
savemat(saving_path, tosave)
|
mit
|
bhargav/scikit-learn
|
sklearn/tests/test_multiclass.py
|
18
|
24010
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils import shuffle
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression,
SGDClassifier)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
assert_raise_message(ValueError, msg, check_classification_targets, y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_partial_fit():
# Test if partial_fit is working as intented
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(y)))
assert_greater(np.mean(y == pred), 0.65)
# Test when mini batches doesn't have all classes
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovr.partial_fit(iris.data[60:], iris.target[60:])
pred = ovr.predict(iris.data)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_partial_fit_predict():
X, y = shuffle(iris.data, iris.target)
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert_equal(len(ovo1.estimators_), n_classes * (n_classes - 1) / 2)
assert_greater(np.mean(y == pred1), 0.65)
assert_almost_equal(pred1, pred2)
# Test when mini-batches don't have all target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovo1.partial_fit(iris.data[60:], iris.target[60:])
pred1 = ovo1.predict(iris.data)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred1, pred2)
assert_equal(len(ovo1.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred1), 0.65)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
|
bsd-3-clause
|
jirivrany/riskflow123d-post
|
helpers/mapcon.py
|
1
|
6218
|
'''
Created on 20 Nov 2013
@author: albert
'''
import matplotlib.pyplot as plt
import numpy as np
import toptriangle
import section
from ruzne import value_set
def dump_test(data):
'''
data dump for testing / can be deleted from production
'''
with open('output.txt', 'w') as f:
for row in data:
print >> f, row
def conc_at_time(elid, ctime, dict_concentrations):
'''
get the concentration on element in given time
'''
try:
conc = dict_concentrations[str(elid)][ctime]
except KeyError:
return 0.0
else:
return conc
def get_triangle_from_node_coords(elem, nodes):
'''
get the node x,y coordinates from node structure
node has 4 points in 3D - we need 3 of it, composing largest triangle
'''
node_coords = [tuple(nodes[node_id][:2]) for node_id in elem[2]]
return toptriangle.get_triangle(*node_coords)
def get_surface_triangle_from_node_coords(elem, nodes, filter_out):
'''
get the node x,y coordinates from node structure
node has 4 points in 3D - we need 3 of it, composing largest triangle
'''
index = [0, 1, 2, 3]
index.remove(filter_out)
node_coords = [tuple(nodes[node_id][:2]) for node_id in elem[2]]
return [node_coords[i] for i in index]
def get_surface_triangles_from_bcd(bcd_name, slist):
"""
Read a Flow .bcd file
search for surface elements
returns list of surface elements with points on plane side
"""
elements = {}
readmode = 0
typ = 0
where = 0
height_limit = 10
with open(bcd_name, "r") as mshfile:
for line in mshfile:
line = line.strip()
if line.startswith('$'):
if line == '$BoundaryConditions':
readmode = 1
else:
readmode = 0
elif readmode:
columns = line.split()
if len(columns) > 5:
#first column is type of condition
element_id = int(columns[4])
if element_id in slist:
elements[element_id] = int(columns[5])
return elements
def get_triangle_from_cut(elem, nodes, height):
'''
get the triangle from tetrahedra cut
'''
node_coords = [tuple(nodes[node_id]) for node_id in elem[2]]
return section.triangles_from_cut(height, node_coords)
def get_triangles_section(mesh_elements, nodes, dict_concentrations, height, sim_time):
'''
transform the mesh coordinates to the list
of tuples (concentration, triangle)
only 3D elements are valid
'''
triangles = []
conc = []
for elid, elem in mesh_elements.iteritems():
if elem[0] > 2:
sub_result = get_triangle_from_cut(elem, nodes, height)
if sub_result:
if len(sub_result) == 2:
#pak jde o pole se dvema trojuhleniky,
#udelame extend a pridame 2x stejnou koncentraci
triangles.extend(sub_result)
conc.append(conc_at_time(elid, sim_time, dict_concentrations))
conc.append(conc_at_time(elid, sim_time, dict_concentrations))
if len(sub_result) == 3:
#pak jde o jeden trojuhelnik a muzem udelat normalni append
triangles.append(sub_result)
conc.append(conc_at_time(elid, sim_time, dict_concentrations))
return zip(conc, triangles)
def get_triangles_surface(mesh_elements, nodes, dict_concentrations, sim_time, bcd_file):
'''
transform the mesh coordinates to the list
of tuples (concentration, triangle)
only 3D elements are valid
'''
elements = get_surface_triangles_from_bcd(bcd_file, mesh_elements)
triangles = [ (conc_at_time(elid, sim_time, dict_concentrations),
get_surface_triangle_from_node_coords(elem, nodes, elements[elid]))
for elid, elem in mesh_elements.iteritems()
if elem[0] > 2]
return triangles
def prepare_triangulation(triangles):
'''
get the triangles and prepare pyplot data from them
'''
conc_list = []
grid = []
tri_list = []
ctr = 0
for conc, tria in triangles:
if tria:
conc_list.append(conc)
grid.extend(tria)
tri_list.append([ctr, ctr+1, ctr+2])
ctr += 3
xy = np.asarray(grid)
x_np = xy[:,0]
y_np = xy[:,1]
triangles = np.asarray(tri_list)
zfaces = np.asarray(conc_list)
return {'x_np': x_np,
'y_np' : y_np,
'triangles': triangles,
'zfaces': zfaces}
def draw_map(triangulation, options):
'''
get the triangle tuple (concentration, triangle] prepared before
and draw the map of triangles
options :
"map_format": "svg",
"map_file": "../../mapa"
'''
lab_x = options['xlabel'] if value_set(options, 'xlabel') else 'mesh X coord'
lab_y = options['ylabel'] if value_set(options, 'ylabel') else 'mesh Y coord'
lab_tit = options['title'] if value_set(options, 'title') else 'Map of concentrations'
plt.figure()
plt.gca().set_aspect('equal')
plt.tripcolor(triangulation['x_np'],
triangulation['y_np'],
triangulation['triangles'],
facecolors=triangulation['zfaces'],
edgecolors='k')
plt.colorbar()
plt.title(lab_tit)
plt.xlabel(lab_x)
plt.ylabel(lab_y)
plt.savefig(options["map_file"], format=options["map_format"])
def test_surface_nodes():
bcd_name = '/home/albert/data/risk_flow/riskflow/test_postproc/master/mm.bcd'
surface = '/home/albert/data/risk_flow/riskflow/test_postproc/master/surface.txt'
with open(surface) as surf:
slist = [int(x) for x in surf.readlines()]
elems = get_surface_triangles_from_bcd(bcd_name, slist)
print elems
if __name__ == '__main__':
test_surface_nodes()
|
gpl-2.0
|
CSB-IG/non-coding-NGS
|
pares_indel.py
|
1
|
2096
|
import matplotlib
matplotlib.rcParams.update({'font.size': 8})
from matplotlib import pyplot as plt
import numpy as np
from matplotlib_venn import venn2, venn3
from sample_code_file_maps import snps, indels, ethnicity_code
from sample_code_file_maps import north, centre, peninsula, admixed
from sample_code_file_maps import mayas, nahuas, tarahumaras, tepehuanos, totonacas, zapotecas
# load indels into a dictionary of sets
tarahumaras_sets = {}
for s in tarahumaras:
tarahumaras_sets[s] = set([v.strip() for v in open( "%s_indel.map" % ethnicity_code[s]).readlines()])
tepehuanos_sets = {}
for s in tepehuanos:
tepehuanos_sets[s] = set([v.strip() for v in open( "%s_indel.map" % ethnicity_code[s]).readlines()])
totonacas_sets = {}
for s in totonacas:
totonacas_sets[s] = set([v.strip() for v in open( "%s_indel.map" % ethnicity_code[s]).readlines()])
nahuas_sets = {}
for s in nahuas:
nahuas_sets[s] = set([v.strip() for v in open( "%s_indel.map" % ethnicity_code[s]).readlines()])
zapotecas_sets = {}
for s in zapotecas:
zapotecas_sets[s] = set([v.strip() for v in open( "%s_indel.map" % ethnicity_code[s]).readlines()])
mayas_sets = {}
for s in mayas:
mayas_sets[s] = set([v.strip() for v in open( "%s_indel.map" % ethnicity_code[s]).readlines()])
figure, axes = plt.subplots(6,1)
venn2([tarahumaras_sets['Tarahumara1'], tarahumaras_sets['Tarahumara2']], set_labels=('Tarahumara 1', 'Tarahumara 2'),ax=axes[0])
venn2([tepehuanos_sets['Tepehuana1'], tepehuanos_sets['Tepehuana2']], set_labels=('Tepehuana 1', 'Tepehuana 2'),ax=axes[1])
venn2([totonacas_sets['Totonaca1'], totonacas_sets['Totonaca2']], set_labels=('Totonaca 1', 'Totonaca 2'),ax=axes[2])
venn2([nahuas_sets['Nahua1'], nahuas_sets['Nahua2']], set_labels=('Nahua 1', 'Nahua 2'),ax=axes[3])
venn2([zapotecas_sets['Zapoteca1'], zapotecas_sets['Zapoteca2']], set_labels=('Zapoteca 1', 'Zapoteca 2'),ax=axes[4])
venn2([mayas_sets['Maya1'], mayas_sets['Maya2']], set_labels=('Maya 1', 'Maya 2'),ax=axes[5])
figure.set_size_inches(3,15.4)
figure.set_dpi(400)
plt.savefig('pares_indel.svg')
|
gpl-3.0
|
bkaiser94/red_cam_pipeline
|
flux_calibration.py
|
1
|
34203
|
"""
Written by JT Fuchs in July 2015
Based off pySALT redution routine specsens.py by S. Crawford
And reading the darned IRAF documentation
flux_calibration.py performs flux calibration on a 1D and wavelength-calibrated spectrum
To run file:
python flux_calibration.py spec_list --flux_list listflux.txt --stan_list liststandards.txt --extinct False
python flux_calibration.py GD1212.ms.fits --usemaster True
:INPUTS:
spec_list: either single *.fits file or text file containing list of files to flux calibrate.
:OPTIONS:
--flux_list: string, file containing standard star fluxes. These are typically m*.dat.
--stan_list: string, file with list of 1D standard star spectra
--usemaster: boolean, Option to use master response function instead of single star observation. Default: False
--extinct: boolean, Option to extinction correct spectra. Default: True
:OUTPUTS:
flux calibrated files (_flux is added to the filename). User will be prompted if file will overwrite existing file.
sensitivity_params.txt: File is updated everytime spec_sens.py is run. Contains information used in the flux calibration. Columns are: input observed spectrum, date/time program was run, observed standard spectrum used for calibration, flux calibration file (m*dat), pixel regions excluded in fit, order of polynomial to flux standard, width in Angstroms used for rebinning, output spectrum filename
sens_fits_DATE.txt: File for diagnostics. Columns are: wavelength, observed flux, polynomial fit, and residuals for each standard listed above. There are extra zeros at the bottom of some columns.
Each list should have the names of the stars, with blue and red exposures next to each other.
The ordering of the standard star flux files should match the order of the standard star list.
Example:
liststandard:
wtfb.LTT3218_930_blue.ms.fits
wtfb.LTT3218_930_red.ms.fits
wnb.GD50_930_blue.ms.fits
wnb.GD50_930_red.ms.fits
listflux:
mltt3218.dat
mgd50.dat
spec_list:
wnb.WD0122p0030_930_blue.ms.fits
wnb.WD0122p0030_930_red.ms.fits
wnb.WD0235p069_930_blue.ms.fits
wnb.WD0235p069_930_red.ms.fits
#####
Counting variables are fruits and vegetables.
"""
import os
import sys
import numpy as np
#import pyfits as fits
import astropy.io.fits as fits
import spectools as st
import datetime
from glob import glob
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline
import argparse
import config
#=============================================
#To help with command line interpretation
def str2bool(v):
if v.lower() in ('yes','true','t','y','1'):
return True
if v.lower() in ('no','false','f','n','0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
#=============================================
#These functions are to help with excluding regions from the sensitivity function
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return array[idx]
def onclick(event):
global ix,iy
ix, iy = event.xdata,event.ydata
global coords
ax.axvline(x=ix,color='k',linewidth='3')
fig.canvas.draw()
coords.append((ix,iy))
#=============================================
def flux_calibrate_now(stdlist,fluxlist,speclist,extinct_correct=False,masterresp=False):
if extinct_correct:
extinctflag = 0
else:
extinctflag = -1
if masterresp: #Use the master response function
#Read in master response function and use that.
cwd = os.getcwd()
#os.chdir('/afs/cas.unc.edu/depts/physics_astronomy/clemens/students/group/standards/response_curves/') #changedthis by adding the thing below and commenting current line
os.chdir(config.master_response_dir)
standards = sorted(glob('*resp*.npy'))
master_response_blue_in = np.load(standards[0])
master_response_blue_in_pol = np.poly1d(master_response_blue_in)
master_response_blue_out = np.load(standards[1])
master_response_blue_out_pol = np.poly1d(master_response_blue_out)
master_response_red_in = np.load(standards[2])
master_response_red_in_pol = np.poly1d(master_response_red_in)
master_response_red_out = np.load(standards[3])
master_response_red_out_pol = np.poly1d(master_response_red_out)
os.chdir(cwd)
airstd = np.ones([4])
#airstd[0] = 1.1
#For saving files correctly
stdflux = np.array(['mmaster.dat'])
#standards = np.array([masterlist])
allexcluded = [[None] for i in range(len(standards))]
orderused = np.zeros([len(standards)])
size = 0.
#Find shift for each night
#For blue setup: use mean of 4530-4590
#for red setup: use mean of 6090-6190
try:
flux_tonight_list = np.genfromtxt('response_curves.txt',dtype=str)
print 'Found response_curves.txt file.'
print flux_tonight_list
if flux_tonight_list.size == 1:
flux_tonight_list = np.array([flux_tonight_list])
for x in flux_tonight_list:
#print x
if 'blue' in x.lower():
wave_tonight, sens_tonight = np.genfromtxt(x,unpack=True)
blue_low_index = np.min(np.where(wave_tonight > 4530.))
blue_high_index = np.min(np.where(wave_tonight > 4590.))
blue_mean_tonight = np.mean(sens_tonight[blue_low_index:blue_high_index])
elif 'red' in x.lower():
wave_tonight, sens_tonight = np.genfromtxt(x,unpack=True)
red_low_index = np.min(np.where(wave_tonight > 6090.))
red_high_index = np.min(np.where(wave_tonight > 6190.))
red_mean_tonight = np.mean(sens_tonight[red_low_index:red_high_index])
except:
print 'No response_curves.txt file found.'
blue_mean_tonight = None
red_mean_tonight = None
flux_tonight_list = ['None','None']
else: #Use the standard star fluxes in the typical manner
#Read in each standard star spectrum
standards = np.genfromtxt(stdlist,dtype=str)
if standards.size ==1:
standards = np.array([standards])
stdflux = np.genfromtxt(fluxlist,dtype=str)
if stdflux.size == 1:
stdflux = np.array([stdflux]) #save stdflux explicitly as an array so you can index if only 1 element
#Check that the files are set up correctly to avoid mixing standards.
#This checks that the files in liststandard have similar characters to those in listflux and the correct order. But might break if flux file doesn't match. E.G. mcd32d9927.dat is often called CD-32_9927 in our system.
'''
onion = 0
for stanspec in standards:
quickcheck = stdflux[onion//2].lower()[1:-4] in stanspec.lower()
if not quickcheck:
print 'Check your standard star and flux files. They are mixed up.'
sys.exit()
onion += 1
'''
orderused = np.zeros([len(standards)])
senspolys = []
airstd = np.zeros([len(standards)])
allexcluded = [[None] for i in range(len(standards))]
#Calculating the sensitivity function of each standard star
cucumber = 0
for stdspecfile in standards:
print stdspecfile
#Read in the observed spectrum of the standard star
obs_spectra,airmass,exptime,dispersion = st.readspectrum(stdspecfile) #obs_spectra is an object containing opfarr,farr,sky,sigma,warr
airstd[cucumber] = airmass
#plt.clf()
#plt.plot(obs_spectra.warr,obs_spectra.opfarr)
#plt.show()
#Do the extinction correction
if extinct_correct:
print 'Extinction correcting spectra.'
plt.clf()
plt.plot(obs_spectra.warr,obs_spectra.opfarr)
obs_spectra.opfarr = st.extinction_correction(obs_spectra.warr,obs_spectra.opfarr,airmass)
plt.plot(obs_spectra.warr,obs_spectra.opfarr)
#plt.show()
#Change to the standard star directory
cwd = os.getcwd()
#os.chdir('/afs/cas.unc.edu/depts/physics_astronomy/clemens/students/group/standards')#changedthis by commenting and adding the line below
os.chdir(config.standards_dir)
#read in the standard file
placeholder = cucumber // 2
stdfile = stdflux[placeholder]
std_spectra = st.readstandard(stdfile)
os.chdir(cwd)
#plt.clf()
#plt.plot(std_spectra.warr,std_spectra.magarr,'.')
#plt.show()
#Only keep the part of the standard file that overlaps with observation.
lowwv = np.where(std_spectra.warr >= np.min(obs_spectra.warr))
lowwv = np.asarray(lowwv)
highwv = np.where(std_spectra.warr <= np.max(obs_spectra.warr))
highwv = np.asarray(highwv)
index = np.intersect1d(lowwv,highwv)
std_spectra.warr = std_spectra.warr[index]
std_spectra.magarr = std_spectra.magarr[index]
std_spectra.wbin = std_spectra.wbin[index]
#Convert from AB mag to fnu, then to fwave (ergs/s/cm2/A)
stdzp = 3.68e-20 #The absolute flux per unit frequency at an AB mag of zero
std_spectra.magarr = st.magtoflux(std_spectra.magarr,stdzp)
std_spectra.magarr = st.fnutofwave(std_spectra.warr, std_spectra.magarr)
#plt.clf()
#plt.plot(std_spectra.warr,std_spectra.magarr,'.')
#plt.show()
#np.savetxt('hz4_stan.txt',np.transpose([std_spectra.warr,std_spectra.magarr]))
#exit()
#We want to rebin the observed spectrum to match with the bins in the standard file. This makes summing up counts significantly easier.
#Set the new binning here.
print 'Starting to rebin: ',stdspecfile
low = np.rint(np.min(obs_spectra.warr)) #Rounds to nearest integer
high = np.rint(np.max(obs_spectra.warr))
size = 0.05 #size in Angstroms you want each bin
num = (high - low) / size + 1. #number of bins. Must add one to get correct number.
wavenew = np.linspace(low,high,num=num) #wavelength of each new bin
#Now do the rebinning using Ian Crossfield's rebinning package
binflux = st.resamplespec(wavenew,obs_spectra.warr,obs_spectra.opfarr,200.) #200 is the oversampling factor
print 'Done rebinning. Now summing the spectrum into new bins to match', stdfile
#plt.clf()
#plt.plot(obs_spectra.warr,obs_spectra.opfarr)
#plt.plot(wavenew,binflux)
#plt.show()
#Now sum the rebinned spectra into the same bins as the standard star file
counts = st.sum_std(std_spectra.warr,std_spectra.wbin,wavenew,binflux)
#plt.clf()
#plt.plot(std_spectra.warr,std_spectra.magarr)
#plt.plot(obs_spectra.warr,obs_spectra.opfarr,'b')
#plt.plot(std_spectra.warr,counts,'g+')
#plt.show()
#Calculate the sensitivity function
sens_function = st.sensfunc(counts,std_spectra.magarr,exptime,std_spectra.wbin,airmass)
#plt.clf()
#plt.plot(std_spectra.warr,sens_function)
#plt.show()
#sys.exit()
#Fit a low order polynomial to this function so that it is smooth.
#The sensitivity function is in units of 2.5 * log10[counts/sec/Ang / ergs/cm2/sec/Ang]
#Choose regions to not include in fit, first by checking if a mask file exists, and if not the prompt for user interaction.
if 'blue' in stdspecfile.lower():
std_mask = stdfile[0:-4] + '_blue_maskasdf.dat'
if 'red' in stdspecfile.lower():
std_mask = stdfile[0:-4] + '_red_maskasdf.dat'
std_mask2 = glob(std_mask)
if len(std_mask2) == 1.:
print 'Found mask file.\n'
mask = np.ones(len(std_spectra.warr))
excluded_wave = np.genfromtxt(std_mask) #Read in wavelengths to exclude
#print excluded_wave
#print type(excluded_wave)
#Find index of each wavelength
excluded = []
for x in excluded_wave:
#print x
#print np.where(std_spectra.warr == find_nearest(std_spectra.warr,x))
pix_val = np.where(std_spectra.warr == find_nearest(std_spectra.warr,x))
excluded.append(pix_val[0][0])
#print excluded
lettuce = 0
while lettuce < len(excluded):
mask[excluded[lettuce]:excluded[lettuce+1]+1] = 0
lettuce += 2
excluded = np.array(excluded).tolist()
allexcluded[cucumber] = excluded
indices = np.where(mask !=0.)
lambdasfit = std_spectra.warr[indices]
fluxesfit = sens_function[indices]
else:
print 'No mask found. User interaction required.\n'
global ax, fig, coords
coords = []
plt.clf()
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.plot(std_spectra.warr,sens_function)
cid = fig.canvas.mpl_connect('button_press_event',onclick)
print 'Please click on both sides of regions you want to exclude. Then close the plot.'
plt.title('Click both sides of regions you want to exclude. Then close the plot.')
plt.show(1)
#Mask our the regions you don't want to fit
#We need make sure left to right clicking and right to left clicking both work.
mask = np.ones(len(std_spectra.warr))
excluded = np.zeros(len(coords))
lettuce = 0
if len(coords) > 0:
while lettuce < len(coords):
x1 = np.where(std_spectra.warr == (find_nearest(std_spectra.warr,coords[lettuce][0])))
excluded[lettuce] = np.asarray(x1)
lettuce += 1
x2 = np.where(std_spectra.warr == (find_nearest(std_spectra.warr,coords[lettuce][0])))
if x2 < x1:
x1,x2 = x2,x1
mask[x1[0][0]:x2[0][0]+1] = 0 #have to add 1 here to the second index so that we exclude through that index. Most important for when we need to exclude the last point of the array.
excluded[lettuce-1] = np.asarray(x1)
excluded[lettuce] = np.asarray(x2)
lettuce += 1
excluded = np.array(excluded).tolist()
allexcluded[cucumber] = excluded
indices = np.where(mask !=0.)
lambdasfit = std_spectra.warr[indices]
fluxesfit = sens_function[indices]
#Save masked wavelengths
lambdasnotfit = std_spectra.warr[excluded]
#print lambdasnotfit
#print stdfile
if 'blue' in stdspecfile.lower():
std_mask_name = stdfile[0:-4] + '_blue_mask.dat'
if 'red' in stdspecfile.lower():
std_mask_name = stdfile[0:-4] + '_red_mask.dat'
np.savetxt(std_mask_name,np.transpose(np.array(lambdasnotfit)))
#exit()
##Move back to directory with observed spectra
#os.chdir(cwd)
#Make sure they are finite
ind1 = np.isfinite(lambdasfit) & np.isfinite(fluxesfit)
lambdasfit = lambdasfit[ind1]
fluxesfit = fluxesfit[ind1]
print 'Fitting the sensitivity funtion now.'
order = 4
repeat = 'yes'
while repeat == 'yes':
p = np.polyfit(lambdasfit,fluxesfit,order)
f = np.poly1d(p)
smooth_sens = f(lambdasfit)
residual = fluxesfit - smooth_sens
plt.close()
plt.ion()
g, (ax1,ax2) = plt.subplots(2,sharex=True)
ax1.plot(lambdasfit,fluxesfit,'b+')
ax1.plot(lambdasfit,smooth_sens,'r',linewidth=2.0)
ax1.set_ylabel('Sensitivity Function')
ax2.plot(lambdasfit,residual,'k+')
ax2.set_ylabel('Residuals')
ax1.set_title('Current polynomial order: %s' % order)
g.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in g.axes[:-1]],visible=False)
plt.show()
plt.ioff()
#Save this sensitivity curve
'''
try:
temp_file = fits.open(stdspecfile)
ADCstat = temp_file[0].header['ADCSTAT']
except:
ADCstat = 'none'
pass
if 'blue' in stdspecfile.lower():
resp_name = 'senscurve_' + stdfile[1:-4] + '_' + str(np.round(airstd[cucumber],decimals=3)) + '_' + ADCstat + '_' + cwd[60:70] + '_blue.txt'
elif 'red' in stdspecfile.lower():
resp_name = 'senscurve_' + stdfile[1:-4] + '_' + str(np.round(airstd[cucumber],decimals=3)) + '_' + ADCstat + '_' + cwd[60:70] + '_red.txt'
print resp_name
#exit()
np.savetxt(resp_name,np.transpose([lambdasfit,fluxesfit]))
'''
repeat = raw_input('Do you want to try again (yes/no)? ')
if repeat == 'yes':
order = raw_input('New order for polynomial: ')
orderused[cucumber] = order
senspolys.append(f)
#Save arrays for diagnostic plots
if cucumber == 0:
bigarray = np.zeros([len(lambdasfit),4.*len(standards)])
artichoke = 0
bigarray[0:len(lambdasfit),artichoke] = lambdasfit
bigarray[0:len(fluxesfit),artichoke+1] = fluxesfit
bigarray[0:len(smooth_sens),artichoke+2] = smooth_sens
bigarray[0:len(residual),artichoke+3] = residual
artichoke += 4
cucumber += 1
#Save fit and residuals into text file for diagnostic plotting later.
#Need to save lambdasfit,fluxesfit,smooth_sens,residual for each standard
#List of standards is found as standards
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
with open('sens_fits_' + now + '.txt','a') as handle:
header = str(standards) + '\n Set of four columns correspond to wavelength, observed flux, polynomial fit, \n and residuals for each standard listed above. \n You will probably need to strip zeros from the bottoms of some columns.'
np.savetxt(handle,bigarray,fmt='%f',header = header)
#Outline for next steps:
#Read in both red and blue files
#compute airmass and compare to airstd
#choose best standard and flux calibrate both blue and red
#save files and write to sensitivity_params.txt
if speclist[-4:] == 'fits':
specfile = np.array([speclist])
else:
specfile = np.genfromtxt(speclist,dtype=str)
if specfile.size ==1:
specfile = np.array([specfile])
length = len(specfile)
airwd = np.zeros([length])
bean = 0
#if length == 1:
# redfile = False
#else:
# redfile = True
avocado = 0
while avocado < length:
#Read in the blue and red spectra we want to flux calibrate. Save the airmass
WD_spectra1,airmass1,exptime1,dispersion1 = st.readspectrum(specfile[avocado])
if (len(specfile) >= 1) and (avocado+1 < length):
if 'red' in specfile[avocado+1]:
redfile = True
else:
redfile = False
else:
redfile = False
if redfile:
WD_spectra2,airmass2,exptime2,dispersion2 = st.readspectrum(specfile[avocado+1])
#Extinction correct WD
if extinct_correct:
print 'Extinction correcting spectra.'
#plt.clf()
#plt.plot(WD_spectra1.warr,WD_spectra1.opfarr)
WD_spectra1.opfarr = st.extinction_correction(WD_spectra1.warr,WD_spectra1.opfarr,airmass1)
WD_spectra1.farr = st.extinction_correction(WD_spectra1.warr,WD_spectra1.farr,airmass1)
WD_spectra1.sky = st.extinction_correction(WD_spectra1.warr,WD_spectra1.sky,airmass1)
WD_spectra1.sigma = st.extinction_correction(WD_spectra1.warr,WD_spectra1.sigma,airmass1)
#plt.plot(WD_spectra1.warr,WD_spectra1.opfarr)
#plt.show()
if redfile:
#plt.clf()
#plt.plot(WD_spectra2.warr,WD_spectra2.opfarr)
WD_spectra2.opfarr = st.extinction_correction(WD_spectra2.warr,WD_spectra2.opfarr,airmass2)
WD_spectra2.farr = st.extinction_correction(WD_spectra2.warr,WD_spectra2.farr,airmass2)
WD_spectra2.sky = st.extinction_correction(WD_spectra2.warr,WD_spectra2.sky,airmass2)
WD_spectra2.sigma = st.extinction_correction(WD_spectra2.warr,WD_spectra2.sigma,airmass2)
#zaplt.plot(WD_spectra2.warr,WD_spectra2.opfarr)
#plt.show()
airwd[avocado] = airmass1
if redfile:
airwd[avocado+1] = airmass2
#Compare the airmasses to determine the best standard star
tomato = 0
while tomato < len(airstd):
if redfile:
diff = np.absolute(np.mean([airwd[avocado],airwd[avocado+1]]) - np.mean([airstd[tomato],airstd[tomato+1]]))
else:
diff = np.absolute(airwd[avocado] - airstd[tomato])
if tomato == 0:
difference = diff
choice = tomato
if diff < difference:
difference = diff
choice = tomato
tomato += 2
#To get the flux calibration, perform the following
#Flux = counts / (Exptime * dispersion * 10**(sens/2.5))
#Get the sensitivity function at the correct wavelength spacing
if masterresp:
header_temp = st.readheader(specfile[avocado])
ADCstatus = header_temp['ADCSTAT']
if ADCstatus == 'IN':
sens_wave1_unscale = master_response_blue_in_pol(WD_spectra1.warr)
blue_low_index = np.min(np.where(WD_spectra1.warr > 4530.))
blue_high_index = np.min(np.where(WD_spectra1.warr > 4590.))
blue_mean_stan = np.mean(sens_wave1_unscale[blue_low_index:blue_high_index])
if blue_mean_tonight == None:
sens_wave1 = sens_wave1_unscale
else:
sens_wave1 = sens_wave1_unscale + (blue_mean_tonight - blue_mean_stan)
choice = 0
else:
sens_wave1_unscale = master_response_blue_out_pol(WD_spectra1.warr)
blue_low_index = np.min(np.where(WD_spectra1.warr > 4530.))
blue_high_index = np.min(np.where(WD_spectra1.warr > 4590.))
blue_mean_stan = np.mean(sens_wave1_unscale[blue_low_index:blue_high_index])
if blue_mean_tonight == None:
sens_wave1 = sens_wave1_unscale
else:
sens_wave1 = sens_wave1_unscale + (blue_mean_tonight - blue_mean_stan)
choice = 1
if redfile:
header_temp = st.readheader(specfile[avocado+1])
ADCstatus = header_temp['ADCSTAT']
if ADCstatus == 'IN':
sens_wave2_unscale = master_response_red_in_pol(WD_spectra2.warr)
red_low_index = np.min(np.where(WD_spectra2.warr > 6090.))
red_high_index = np.min(np.where(WD_spectra2.warr > 6190.))
red_mean_stan = np.mean(sens_wave2_unscale[red_low_index:red_high_index])
if red_mean_tonight == None:
sens_wave2 = sens_wave2_unscale
else:
sens_wave2 = sens_wave2_unscale + (red_mean_tonight - red_mean_stan)
choice2 = 2
else:
sens_wave2_unscale = master_response_red_out_pol(WD_spectra2.warr)
red_low_index = np.min(np.where(WD_spectra2.warr > 6090.))
red_high_index = np.min(np.where(WD_spectra2.warr > 6190.))
red_mean_stan = np.mean(sens_wave2_unscale[red_low_index:red_high_index])
if red_mean_tonight == None:
sens_wave2 = sens_wave2_unscale
else:
sens_wave2 = sens_wave2_unscale + (red_mean_tonight - red_mean_stan)
choice2 = 3
else:
sens_wave1 = senspolys[choice](WD_spectra1.warr)
if redfile:
sens_wave2 = senspolys[choice+1](WD_spectra2.warr)
#Perform the flux calibration. We do this on the optimal extraction, non-variance weighted aperture, the sky spectrum, and the sigma spectrum.
print 'Doing the final flux calibration.'
#np.savetxt('response_g60-54_extinction_2016-03-17.txt',np.transpose([WD_spectra1.warr,(exptime1 * dispersion1 * 10.**(sens_wave1/2.5))]))#,WD_spectra2.warr,(exptime2 * dispersion2 * 10.**(sens_wave2/2.5))]))
#exit()
star_opflux1 = st.cal_spec(WD_spectra1.opfarr,sens_wave1,exptime1,dispersion1)
star_flux1 = st.cal_spec(WD_spectra1.farr,sens_wave1,exptime1,dispersion1)
sky_flux1 = st.cal_spec(WD_spectra1.sky,sens_wave1,exptime1,dispersion1)
sigma_flux1 = st.cal_spec(WD_spectra1.sigma,sens_wave1,exptime1,dispersion1)
if redfile:
star_opflux2 = st.cal_spec(WD_spectra2.opfarr,sens_wave2,exptime2,dispersion2)
star_flux2 = st.cal_spec(WD_spectra2.farr,sens_wave2,exptime2,dispersion2)
sky_flux2 = st.cal_spec(WD_spectra2.sky,sens_wave2,exptime2,dispersion2)
sigma_flux2 = st.cal_spec(WD_spectra2.sigma,sens_wave2,exptime2,dispersion2)
#plt.clf()
#plt.plot(WD_spectra.warr,star_opflux)
#plt.show()
#Save final spectra if using master response
if masterresp:
if avocado == 0:
diagnostic_array = np.zeros([len(WD_spectra1.warr),2*length])
diagnostic_array[0:len(WD_spectra1.warr),bean] = WD_spectra1.warr
bean += 1
diagnostic_array[0:len(star_opflux1),bean] = star_opflux1
bean += 1
if redfile:
diagnostic_array[0:len(WD_spectra2.warr),bean] = WD_spectra2.warr
bean += 1
diagnostic_array[0:len(star_opflux2),bean] = star_opflux2
bean += 1
#if avocado == (length -1 ) or (redfile == True and avocado == (length-2)):
# print 'Saveing diagnostic file.'
# now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
# with open('flux_fits_' + now + '.txt','a') as handle:
# header = str(specfile) + '\n Each star is formatted as wavelength, flux'
# np.savetxt(handle,diagnostic_array,fmt='%.10e',header=header)
print 'Saving the final spectrum.'
#Save the flux-calibrated spectrum and update the header
header1 = st.readheader(specfile[avocado])
header1.set('EX-FLAG',extinctflag) #Extiction correction? 0=yes, -1=no
header1.set('CA-FLAG',0) #Calibrated to flux scale? 0=yes, -1=no
header1.set('BUNIT','erg/cm2/s/A') #physical units of the array value
header1.set('STANDARD',str(standards[choice]),'Flux standard used') #flux standard used for flux-calibration
if masterresp:
header1.set('STDOFF',str(flux_tonight_list[0]),'Night offset used')
if redfile:
header2 = st.readheader(specfile[avocado+1])
header2.set('EX-FLAG',extinctflag) #Extiction correction? 0=yes, -1=no
header2.set('CA-FLAG',0) #Calibrated to flux scale? 0=yes, -1=no
header2.set('BUNIT','erg/cm2/s/A') #physical units of the array value
if masterresp:
header2.set('STANDARD',str(standards[choice2]),'Flux standard used') #flux standard used for flux-calibration
header1.set('STDOFF',str(flux_tonight_list[1]),'Night offset used')
else:
header2.set('STANDARD',str(standards[choice+1]),'Flux standard used') #flux standard used for flux-calibration
#Set up size of new fits image
Ni = 4. #Number of extensions
Nx1 = len(star_flux1)
if redfile:
Nx2 = len(star_flux2)
Ny = 1. #All 1D spectra
data1 = np.empty(shape = (Ni,Ny,Nx1))
data1[0,:,:] = star_opflux1
data1[1,:,:] = star_flux1
data1[2,:,:] = sky_flux1
data1[3,:,:] = sigma_flux1
if redfile:
data2 = np.empty(shape = (Ni,Ny,Nx2))
data2[0,:,:] = star_opflux2
data2[1,:,:] = star_flux2
data2[2,:,:] = sky_flux2
data2[3,:,:] = sigma_flux2
#Add '_flux' to the end of the filename
loc1 = specfile[avocado].find('.ms.fits')
if masterresp:
newname1 = specfile[avocado][0:loc1] + '_flux_' + stdflux[0][1:-4] + '.ms.fits'
else:
newname1 = specfile[avocado][0:loc1] + '_flux_' + stdflux[choice//2][1:-4] + '.ms.fits'
clob = False
mylist = [True for f in os.listdir('.') if f == newname1]
exists = bool(mylist)
if exists:
print 'File %s already exists.' % newname1
nextstep = raw_input('Do you want to overwrite or designate a new name (overwrite/new)? ')
if nextstep == 'overwrite':
clob = True
exists = False
elif nextstep == 'new':
newname1 = raw_input('New file name: ')
exists = False
else:
exists = False
print 'Saving: ', newname1
newim1 = fits.PrimaryHDU(data=data1,header=header1)
newim1.writeto(newname1,clobber=clob)
if redfile:
loc2 = specfile[avocado+1].find('.ms.fits')
if masterresp:
newname2 = specfile[avocado+1][0:loc2] + '_flux_' + stdflux[0][1:-4] + '.ms.fits'
else:
newname2 = specfile[avocado+1][0:loc2] + '_flux_' + stdflux[choice//2][1:-4] + '.ms.fits'
clob = False
mylist = [True for f in os.listdir('.') if f == newname2]
exists = bool(mylist)
if exists:
print 'File %s already exists.' % newname2
nextstep = raw_input('Do you want to overwrite or designate a new name (overwrite/new)? ')
if nextstep == 'overwrite':
clob = True
exists = False
elif nextstep == 'new':
newname2 = raw_input('New file name: ')
exists = False
else:
exists = False
newim2 = fits.PrimaryHDU(data=data2,header=header2)
newim2.writeto(newname2,clobber=clob)
print 'Saving: ', newname2
#Finally, save all the used parameters into a file for future reference.
# specfile,current date, stdspecfile,stdfile,order,size,newname
f = open('sensitivity_params.txt','a')
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
if masterresp:
newinfo1 = specfile[avocado] + '\t' + now + '\t' + standards[choice] + '\t' + stdflux[0] + '\t' + str(allexcluded[choice]) + '\t' + str(orderused[choice]) + '\t' + str(size) + '\t' + newname1
else:
newinfo1 = specfile[avocado] + '\t' + now + '\t' + standards[choice] + '\t' + stdflux[choice//2] + '\t' + str(allexcluded[choice]) + '\t' + str(orderused[choice]) + '\t' + str(size) + '\t' + newname1
if redfile:
if masterresp:
newinfo2 = specfile[avocado+1] + '\t' + now + '\t' + standards[choice2] + '\t' + stdflux[0] + '\t' + str(allexcluded[choice+1]) + '\t' + str(orderused[choice+1]) + '\t' + str(size) + '\t' + newname2
else:
newinfo2 = specfile[avocado+1] + '\t' + now + '\t' + standards[choice+1] + '\t' + stdflux[choice//2] + '\t' + str(allexcluded[choice+1]) + '\t' + str(orderused[choice+1]) + '\t' + str(size) + '\t' + newname2
f.write(newinfo1 + "\n" + newinfo2 + "\n")
else:
f.write(newinfo1 + "\n")
f.close()
if redfile:
avocado += 2
else:
avocado += 1
print 'Done flux calibrating the spectra.'
#Run from command line
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('spec_list')
parser.add_argument('--flux_list',default=None)
parser.add_argument('--stan_list',default=None)
parser.add_argument('--usemaster',type=str2bool,nargs='?',const=False,default=False,help='Activate nice mode.')
parser.add_argument('--extinct',type=str2bool,nargs='?',const=True,default=True,help='Activate nice mode.')
args = parser.parse_args()
#print args.stand_list
flux_calibrate_now(args.stan_list,args.flux_list,args.spec_list,extinct_correct=args.extinct,masterresp=args.usemaster)
|
mit
|
msincenselee/vnpy
|
setup.py
|
1
|
7560
|
"""
vn.py - By Traders, For Traders.
The vn.py project is an open-source quantitative trading framework
that is developed by traders, for traders.
The project is mainly written in Python and uses C++ for low-layer
and performance sensitive infrastructure.
Using the vn.py project, institutional investors and professional
traders, such as hedge funds, prop trading firms and investment banks,
can easily develop complex trading strategies with the Event Engine
Strategy Module, and automatically route their orders to the most
desired destinations, including equity, commodity, forex and many
other financial markets.
"""
import ast
import os
import platform
import re
import sys
from setuptools import Extension, find_packages, setup
def gather_autocxxpy_generated_files(root: str):
fs = [os.path.join(root, "module.cpp")]
for root, dirs, filenames in os.walk(root):
for filename in filenames:
filebase, ext = os.path.splitext(filename)
if ext == ".cpp" and filebase.startswith("generated_functions_"):
path = os.path.join(root, filename)
fs.append(path)
return fs
def check_extension_build_flag(ext_modules, key: str, module: Extension):
value = os.environ.get(key, None)
if value is not None:
if value == '1':
ext_modules = list(set(ext_modules) | {module})
elif value == '0':
ext_modules = list(set(ext_modules) - {module})
else:
raise ValueError(
f"Flag {key} should be '0' or '1', but {repr(value)} got.")
return ext_modules
def is_psycopg2_exists():
try:
import psycopg2 # noqa
return True
except ImportError:
return False
def get_install_requires():
install_requires = [
"PyQt5",
"qdarkstyle",
"requests",
"websocket-client",
"peewee",
"pymysql",
"mongoengine",
"numpy",
"pandas",
"matplotlib",
"seaborn",
"futu-api",
"tigeropen",
"rqdatac",
"ta-lib",
"ibapi",
"deap",
"pyzmq",
"QScintilla"
]
if not is_psycopg2_exists():
install_requires.append("psycopg2-binary")
if sys.version_info.minor < 7:
install_requires.append("dataclasses")
return install_requires
def get_version_string():
global version
with open("vnpy/__init__.py", "rb") as f:
version_line = re.search(
r"__version__\s+=\s+(.*)", f.read().decode("utf-8")
).group(1)
return str(ast.literal_eval(version_line))
def get_ext_modules():
if platform.uname().system == "Windows":
compiler_flags = [
"/MP", "/std:c++17", # standard
"/O2", "/Ob2", "/Oi", "/Ot", "/Oy", "/GL", # Optimization
"/bigobj", # Better compatibility
"/wd4819", # 936 code page
"/D_CRT_SECURE_NO_WARNINGS",
# suppress warning of unsafe functions like fopen, strcpy, etc
]
extra_link_args = []
runtime_library_dirs = None
else:
compiler_flags = [
"-std=c++17", # standard
"-O3", # Optimization
"-Wno-delete-incomplete", "-Wno-sign-compare",
]
extra_link_args = ["-lstdc++"]
runtime_library_dirs = ["$ORIGIN"]
vnctpmd = Extension(
"vnpy.api.ctp.vnctpmd",
[
"vnpy/api/ctp/vnctp/vnctpmd/vnctpmd.cpp",
],
include_dirs=["vnpy/api/ctp/include",
"vnpy/api/ctp/vnctp", ],
define_macros=[],
undef_macros=[],
library_dirs=["vnpy/api/ctp/libs", "vnpy/api/ctp"],
libraries=["thostmduserapi_se", "thosttraderapi_se", ],
extra_compile_args=compiler_flags,
extra_link_args=extra_link_args,
runtime_library_dirs=runtime_library_dirs,
depends=[],
language="cpp",
)
vnctptd = Extension(
"vnpy.api.ctp.vnctptd",
[
"vnpy/api/ctp/vnctp/vnctptd/vnctptd.cpp",
],
include_dirs=["vnpy/api/ctp/include",
"vnpy/api/ctp/vnctp", ],
define_macros=[],
undef_macros=[],
library_dirs=["vnpy/api/ctp/libs", "vnpy/api/ctp"],
libraries=["thostmduserapi_se", "thosttraderapi_se", ],
extra_compile_args=compiler_flags,
extra_link_args=extra_link_args,
runtime_library_dirs=runtime_library_dirs,
depends=[],
language="cpp",
)
vnoes = Extension(
name="vnpy.api.oes.vnoes",
sources=gather_autocxxpy_generated_files(
"vnpy/api/oes/vnoes/generated_files/",
),
include_dirs=["vnpy/api/oes/vnoes/include",
"vnpy/api/oes/vnoes/include/oes", ],
define_macros=[("BRIGAND_NO_BOOST_SUPPORT", "1")],
undef_macros=[],
library_dirs=["vnpy/api/oes/vnoes/libs"],
libraries=["oes_api"],
extra_compile_args=compiler_flags,
extra_link_args=extra_link_args,
runtime_library_dirs=runtime_library_dirs,
depends=[],
language="cpp",
)
if platform.system() == "Windows":
# use pre-built pyd for windows ( support python 3.7 only )
ext_modules = []
elif platform.system() == "Darwin":
ext_modules = []
else:
ext_modules = [vnctptd, vnctpmd, vnoes]
ext_modules = check_extension_build_flag(
ext_modules, "VNPY_BUILD_OES", vnoes)
ext_modules = check_extension_build_flag(
ext_modules, "VNPY_BUILD_CTP", vnctptd)
ext_modules = check_extension_build_flag(
ext_modules, "VNPY_BUILD_CTP", vnctpmd)
return ext_modules
parallel = os.environ.get('VNPY_BUILD_PARALLEL', None)
if parallel:
if parallel == 'auto':
parallel = os.cpu_count()
if parallel != 'no':
from ci.parallel_build_distutils import patch_distutils
patch_distutils(int(parallel))
setup(
name="vnpy",
version=get_version_string(),
author="vn.py team",
author_email="[email protected]",
license="MIT",
url="https://www.vnpy.com",
description="A framework for developing quant trading systems.",
long_description=__doc__,
keywords='quant quantitative investment trading algotrading',
include_package_data=True,
packages=find_packages(exclude=["tests", "ci", "tests.*"]),
package_data={"": [
"*.ico",
"*.ini",
"*.dll",
"*.so",
"*.pyd",
]},
install_requires=get_install_requires(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Operating System :: Microsoft :: Windows :: Windows 7",
"Operating System :: Microsoft :: Windows :: Windows 8",
"Operating System :: Microsoft :: Windows :: Windows 10",
"Operating System :: Microsoft :: Windows :: Windows Server 2008",
"Operating System :: Microsoft :: Windows :: Windows Server 2012",
"Operating System :: Microsoft :: Windows :: Windows Server 2012",
"Operating System :: POSIX :: Linux"
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Office/Business :: Financial :: Investment",
"Programming Language :: Python :: Implementation :: CPython",
"License :: OSI Approved :: MIT License",
"Natural Language :: Chinese (Simplified)",
"Natural Language :: Chinese (Simplified)"
],
ext_modules=get_ext_modules(),
)
|
mit
|
llondon6/kerr_public
|
notes/ns/notebooks/batch_compare_psi4.py
|
1
|
5221
|
# coding: utf-8
# # Array Compare $\psi_4$ data with low and high level MMRDNS Models
# ### Setup The Enviroment
# In[6]:
# Low-level import
from numpy import array,loadtxt,linspace,zeros,exp,ones,unwrap,angle,pi
# Setup ipython environment
get_ipython().magic(u'load_ext autoreload')
get_ipython().magic(u'autoreload 2')
# %matplotlib inline
# Import useful things from kerr
from kerr.formula.ksm2_cw import CW as cwfit
from kerr.formula.ksm2_sc import SC as scfit
from kerr.pttools import leaver_workfunction as lvrwork
from kerr import leaver,rgb
from kerr.models import mmrdns
#
from nrutils import scsearch,gwylm,gwf
# Setup plotting backend
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 0.8
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = 12
mpl.rcParams['axes.labelsize'] = 20
from matplotlib.pyplot import *
# ### Define master plotting function
# In[19]:
def plot_comparison(ll,mm,q,T0,ax):
# Load the data
data_file_string = '/Users/book/GARREG/Spectroscopy/Ylm_Depictions/NonPrecessing/MULTI_DATA_6/T0_%i_nmax2_Mmin97ll_Mmin_r75_qref1.50__p1_17-Mar-2014gnx_/Data_Sets/HRq-series/D9_q%1.1f_a0.0_m160/DEPICTION_INFO::NODD_INPUT_ll%i_mm%i_r75.asc'%(T0,q,ll,mm)
data = loadtxt(data_file_string)
# Collect raw fit data for later convenience
rfdata = {}
for k,row in enumerate(data):
#
ll,mm,q,m1,m2,x1,x2,jf,Mf,qid,rew,imw,rewfit,imwfit,reA,imA,reAmean,imAmean,minA,maxA,T1,dT,match,rmse,reB,imB,reBmean,imBmean,minB,maxB = row
ll,mm = int(ll),int(mm)
A = reA+1j*imA
cw = rew + 1j*imw
try:
l,m,n,p = mmrdns.calc_z(qid)
except:
l,m,n,p,l2,m2,n2,p2 = mmrdns.calc_z(qid)
rfdata[(l,m,n,p)] = {}
rfdata[(l,m,n,p)]['ll'],rfdata[(l,m,n,p)]['mm'],rfdata[(l,m,n,p)]['A'],rfdata[(l,m,n,p)]['cw'] = ll,mm,A,cw
# Print the relative phase
print angle( rfdata[(max(mm,2),mm,0,1)]['A'] * rfdata[(ll,mm,0,1)]['A'].conj() )
print angle( mmrdns.Afit(2,2,0,mmrdns.q2eta(q)) * mmrdns.Afit(3,2,0,mmrdns.q2eta(q)).conj() )
# Define function to calculate raw fit
def rawfit(t):
y = zeros( t.shape, dtype=complex )
for k,row in enumerate(data):
#
ll,mm,q,m1,m2,x1,x2,jf,Mf,qid,rew,imw,rewfit,imwfit,reA,imA,reAmean,imAmean,minA,maxA,T1,dT,match,rmse,reB,imB,reBmean,imBmean,minB,maxB = row
ll,mm = int(ll),int(mm)
A = reA+1j*imA
cw = rew + 1j*imw
try:
l,m,n,p = mmrdns.calc_z(qid)
except:
l,m,n,p,l2,m2,n2,p2 = mmrdns.calc_z(qid)
# NOTE that the amplitudes are for Psi4 here
if True: # (l,m,n,p) in [ (2,2,0,1) ,(2,2,1,1) ] :
y += A*exp( 1j*cw*(t-T0) )
#
a = gwf( array( [t,y.real,-y.imag] ).T )
#
return a,q
_,q = rawfit( linspace(T0,50) )
#
# A = scsearch( keyword='hrq',notkeyword='athena', q=q, nonspinning=True,verbose=True )[0]
A = scsearch( keyword=['hr','athena'], q=q, nonspinning=True,verbose=True )[0]
#
imrnr = gwylm( A, lm=([ll,mm],[2,2]), verbose=True, dt=0.5 )
nr = imrnr.ringdown(T0=T0)
y,_ = rawfit( nr.lm[(ll,mm)]['psi4'].t )
#
eta = mmrdns.q2eta(q)
h = mmrdns.meval_spherical_mode(ll,mm,eta,kind='psi4',gwfout=True)(nr.ylm[0].t)
h.align(nr.lm[(ll,mm)]['psi4'],method='average-phase',mask=nr.ylm[0].t<60)
y.align(nr.lm[(ll,mm)]['psi4'],method='average-phase',mask=nr.ylm[0].t<60)
# nr.lm[(ll,mm)]['psi4'].plot()
# y.plot()
# h.plot()
#fig = figure( figsize=2*array([5,3]) )
sca(ax)
ax.set_yscale("log", nonposy='clip')
plot( nr.ylm[0].t, nr.lm[(ll,mm)]['psi4'].amp, color=0.5*ones((3,)), label=None )
plot( nr.ylm[0].t, y.amp, '--k', label=None )
plot( nr.ylm[0].t, h.amp, 'k', alpha=0.2, linewidth=6, label=None )
plot( nr.ylm[0].t, nr.lm[(ll,mm)]['psi4'].plus, color=0.5*ones((3,)), label='NR' )
plot( nr.ylm[0].t, y.plus, '--k', label='RAW-FIT' )
plot( nr.ylm[0].t, h.plus, 'k', alpha=0.2, linewidth=6,label='MMRDNS' )
# plot( nr.ylm[0].t, nr.lm[(ll,mm)]['psi4'].cross, color=0.5*ones((3,)), label='NR', alpha=0.8 )
# plot( nr.ylm[0].t, y.cross, '--k', label='RAW-FIT', alpha=0.8 )
# plot( nr.ylm[0].t, h.cross, 'k', alpha=0.1, linewidth=6, label='MMRDNS' )
ylim( [max(nr.lm[(ll,mm)]['psi4'].amp)*1e-5,1.2*max(nr.lm[(ll,mm)]['psi4'].amp)] )
xlim( [T0,150] )
xlabel(r'$(t-\mathrm{argmax}_t(\dot{h}(t)))/M$')
ylabel(r'${rM}\psi_{%i%i}$'%(ll,mm))
legend(frameon=False)
title( nr.label )
# ### Define parameter ranges
# In[22]:
T0_range = [5]
llmm_range = [ (2,2), (2,1), (3,3), (3,2), (4,4), (4,3), (5,5) ]
q_range = [ 1.2, 1.5, 2, 3.1, 4, 4.3 ]
# In[23]:
from numpy import sqrt
#
fig,axarr = subplots( len(llmm_range), len(q_range),figsize=sqrt(2*len(llmm_range)*len(q_range))*array( [5,4] ))
tight_layout(pad=4, w_pad=4, h_pad=4)
#
for T0 in T0_range:
for k,(ll,mm) in enumerate(llmm_range):
for h,q in enumerate(q_range):
#
ax = axarr[k,h]
plot_comparison(ll,mm,q,T0,ax)
savefig('compare_all_psi4.png')
# In[ ]:
|
mit
|
lazywei/scikit-learn
|
sklearn/cross_decomposition/pls_.py
|
8
|
28767
|
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
#1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
#2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
@property
def coefs(self):
check_is_fitted(self, 'coef_')
DeprecationWarning("``coefs`` attribute has been deprecated and will be "
"removed in version 0.17. Use ``coef_`` instead")
return self.coef_
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d with "
"X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
|
bsd-3-clause
|
jjx02230808/project0223
|
examples/model_selection/plot_roc.py
|
49
|
5041
|
"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
toobaz/pandas
|
pandas/tests/reductions/test_reductions.py
|
2
|
42361
|
from datetime import datetime, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
NaT,
Period,
PeriodIndex,
RangeIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
isna,
timedelta_range,
to_timedelta,
)
from pandas.core import nanops
import pandas.util.testing as tm
def get_objs():
indexes = [
tm.makeBoolIndex(10, name="a"),
tm.makeIntIndex(10, name="a"),
tm.makeFloatIndex(10, name="a"),
tm.makeDateIndex(10, name="a"),
tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern"),
tm.makePeriodIndex(10, name="a"),
tm.makeStringIndex(10, name="a"),
tm.makeUnicodeIndex(10, name="a"),
]
arr = np.random.randn(10)
series = [Series(arr, index=idx, name="a") for idx in indexes]
objs = indexes + series
return objs
objs = get_objs()
class TestReductions:
@pytest.mark.parametrize("opname", ["max", "min"])
@pytest.mark.parametrize("obj", objs)
def test_ops(self, opname, obj):
result = getattr(obj, opname)()
if not isinstance(obj, PeriodIndex):
expected = getattr(obj.values, opname)()
else:
expected = pd.Period(
ordinal=getattr(obj._ndarray_values, opname)(), freq=obj.freq
)
try:
assert result == expected
except TypeError:
# comparing tz-aware series with np.array results in
# TypeError
expected = expected.astype("M8[ns]").astype("int64")
assert result.value == expected
def test_nanops(self):
# GH#7261
for opname in ["max", "min"]:
for klass in [Index, Series]:
arg_op = "arg" + opname if klass is Index else "idx" + opname
obj = klass([np.nan, 2.0])
assert getattr(obj, opname)() == 2.0
obj = klass([np.nan])
assert pd.isna(getattr(obj, opname)())
assert pd.isna(getattr(obj, opname)(skipna=False))
obj = klass([])
assert pd.isna(getattr(obj, opname)())
assert pd.isna(getattr(obj, opname)(skipna=False))
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
assert getattr(obj, opname)() == datetime(2011, 11, 1)
assert getattr(obj, opname)(skipna=False) is pd.NaT
assert getattr(obj, arg_op)() == 1
result = getattr(obj, arg_op)(skipna=False)
if klass is Series:
assert np.isnan(result)
else:
assert result == -1
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
assert getattr(obj, opname)(), datetime(2011, 11, 1)
assert getattr(obj, opname)(skipna=False) is pd.NaT
assert getattr(obj, arg_op)() == 1
result = getattr(obj, arg_op)(skipna=False)
if klass is Series:
assert np.isnan(result)
else:
assert result == -1
for dtype in ["M8[ns]", "datetime64[ns, UTC]"]:
# cases with empty Series/DatetimeIndex
obj = klass([], dtype=dtype)
assert getattr(obj, opname)() is pd.NaT
assert getattr(obj, opname)(skipna=False) is pd.NaT
with pytest.raises(ValueError, match="empty sequence"):
getattr(obj, arg_op)()
with pytest.raises(ValueError, match="empty sequence"):
getattr(obj, arg_op)(skipna=False)
# argmin/max
obj = Index(np.arange(5, dtype="int64"))
assert obj.argmin() == 0
assert obj.argmax() == 4
obj = Index([np.nan, 1, np.nan, 2])
assert obj.argmin() == 1
assert obj.argmax() == 3
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
obj = Index([np.nan])
assert obj.argmin() == -1
assert obj.argmax() == -1
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2), pd.NaT])
assert obj.argmin() == 1
assert obj.argmax() == 2
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
obj = Index([pd.NaT])
assert obj.argmin() == -1
assert obj.argmax() == -1
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
@pytest.mark.parametrize("op, expected_col", [["max", "a"], ["min", "b"]])
def test_same_tz_min_max_axis_1(self, op, expected_col):
# GH 10390
df = DataFrame(
pd.date_range("2016-01-01 00:00:00", periods=3, tz="UTC"), columns=["a"]
)
df["b"] = df.a.subtract(pd.Timedelta(seconds=3600))
result = getattr(df, op)(axis=1)
expected = df[expected_col].rename(None)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["maximum", "minimum"])
def test_numpy_reduction_with_tz_aware_dtype(self, tz_aware_fixture, func):
# GH 15552
tz = tz_aware_fixture
arg = pd.to_datetime(["2019"]).tz_localize(tz)
expected = Series(arg)
result = getattr(np, func)(expected, expected)
tm.assert_series_equal(result, expected)
class TestIndexReductions:
# Note: the name TestIndexReductions indicates these tests
# were moved from a Index-specific test file, _not_ that these tests are
# intended long-term to be Index-specific
@pytest.mark.parametrize(
"start,stop,step",
[
(0, 400, 3),
(500, 0, -6),
(-10 ** 6, 10 ** 6, 4),
(10 ** 6, -10 ** 6, -4),
(0, 10, 20),
],
)
def test_max_min_range(self, start, stop, step):
# GH#17607
idx = RangeIndex(start, stop, step)
expected = idx._int64index.max()
result = idx.max()
assert result == expected
# skipna should be irrelevant since RangeIndex should never have NAs
result2 = idx.max(skipna=False)
assert result2 == expected
expected = idx._int64index.min()
result = idx.min()
assert result == expected
# skipna should be irrelevant since RangeIndex should never have NAs
result2 = idx.min(skipna=False)
assert result2 == expected
# empty
idx = RangeIndex(start, stop, -step)
assert isna(idx.max())
assert isna(idx.min())
def test_minmax_timedelta64(self):
# monotonic
idx1 = TimedeltaIndex(["1 days", "2 days", "3 days"])
assert idx1.is_monotonic
# non-monotonic
idx2 = TimedeltaIndex(["1 days", np.nan, "3 days", "NaT"])
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timedelta("1 days")
assert idx.max() == Timedelta("3 days")
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ["min", "max"]:
# Return NaT
obj = TimedeltaIndex([])
assert pd.isna(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax_timedelta64(self):
td = timedelta_range("16815 days", "16820 days", freq="D")
assert np.min(td) == Timedelta("16815 days")
assert np.max(td) == Timedelta("16820 days")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(td, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(td, out=0)
assert np.argmin(td) == 0
assert np.argmax(td) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(td, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(td, out=0)
def test_timedelta_ops(self):
# GH#4984
# make sure ops return Timedelta
s = Series(
[Timestamp("20130101") + timedelta(seconds=i * i) for i in range(10)]
)
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
assert result == expected
result = td.to_frame().mean()
assert result[0] == expected
result = td.quantile(0.1)
expected = Timedelta(np.timedelta64(2600, "ms"))
assert result == expected
result = td.median()
expected = to_timedelta("00:00:09")
assert result == expected
result = td.to_frame().median()
assert result[0] == expected
# GH#6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta("00:01:21")
assert result == expected
result = td.to_frame().sum()
assert result[0] == expected
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
assert result == expected
result = td.to_frame().std()
assert result[0] == expected
# invalid ops
for op in ["skew", "kurt", "sem", "prod"]:
msg = "reduction operation '{}' not allowed for this dtype"
with pytest.raises(TypeError, match=msg.format(op)):
getattr(td, op)()
# GH#10040
# make sure NaT is properly handled by median()
s = Series([Timestamp("2015-02-03"), Timestamp("2015-02-07")])
assert s.diff().median() == timedelta(days=4)
s = Series(
[Timestamp("2015-02-03"), Timestamp("2015-02-07"), Timestamp("2015-02-15")]
)
assert s.diff().median() == timedelta(days=6)
def test_minmax_tz(self, tz_naive_fixture):
tz = tz_naive_fixture
# monotonic
idx1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(
["2011-01-01", pd.NaT, "2011-01-03", "2011-01-02", pd.NaT], tz=tz
)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp("2011-01-01", tz=tz)
assert idx.max() == Timestamp("2011-01-03", tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
@pytest.mark.parametrize("op", ["min", "max"])
def test_minmax_nat_datetime64(self, op):
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax_integer(self):
# GH#26125
idx = Index([1, 2, 3])
expected = idx.values.max()
result = np.max(idx)
assert result == expected
expected = idx.values.min()
result = np.min(idx)
assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(idx, out=0)
expected = idx.values.argmax()
result = np.argmax(idx)
assert result == expected
expected = idx.values.argmin()
result = np.argmin(idx)
assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(idx, out=0)
def test_numpy_minmax_range(self):
# GH#26125
idx = RangeIndex(0, 10, 3)
expected = idx._int64index.max()
result = np.max(idx)
assert result == expected
expected = idx._int64index.min()
result = np.min(idx)
assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(idx, out=0)
# No need to test again argmax/argmin compat since the implementation
# is the same as basic integer index
def test_numpy_minmax_datetime64(self):
dr = pd.date_range(start="2016-01-15", end="2016-01-20")
assert np.min(dr) == Timestamp("2016-01-15 00:00:00", freq="D")
assert np.max(dr) == Timestamp("2016-01-20 00:00:00", freq="D")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(dr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(dr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(dr, out=0)
def test_minmax_period(self):
# monotonic
idx1 = pd.PeriodIndex([NaT, "2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.PeriodIndex(
["2011-01-01", NaT, "2011-01-03", "2011-01-02", NaT], freq="D"
)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == pd.Period("2011-01-01", freq="D")
assert idx.max() == pd.Period("2011-01-03", freq="D")
assert idx1.argmin() == 1
assert idx2.argmin() == 0
assert idx1.argmax() == 3
assert idx2.argmax() == 2
for op in ["min", "max"]:
# Return NaT
obj = PeriodIndex([], freq="M")
result = getattr(obj, op)()
assert result is NaT
obj = PeriodIndex([NaT], freq="M")
result = getattr(obj, op)()
assert result is NaT
obj = PeriodIndex([NaT, NaT, NaT], freq="M")
result = getattr(obj, op)()
assert result is NaT
def test_numpy_minmax_period(self):
pr = pd.period_range(start="2016-01-15", end="2016-01-20")
assert np.min(pr) == Period("2016-01-15", freq="D")
assert np.max(pr) == Period("2016-01-20", freq="D")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(pr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(pr, out=0)
assert np.argmin(pr) == 0
assert np.argmax(pr) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(pr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(pr, out=0)
def test_min_max_categorical(self):
ci = pd.CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)
with pytest.raises(TypeError):
ci.min()
with pytest.raises(TypeError):
ci.max()
ci = pd.CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=True)
assert ci.min() == "c"
assert ci.max() == "b"
class TestSeriesReductions:
# Note: the name TestSeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
def test_sum_inf(self):
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert np.isinf(s.sum())
arr = np.random.randn(100, 100).astype("f4")
arr[:, 2] = np.inf
with pd.option_context("mode.use_inf_as_na", True):
tm.assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
assert np.isinf(res).all()
@pytest.mark.parametrize("use_bottleneck", [True, False])
@pytest.mark.parametrize("method, unit", [("sum", 0.0), ("prod", 1.0)])
def test_empty(self, method, unit, use_bottleneck):
with pd.option_context("use_bottleneck", use_bottleneck):
# GH#9422 / GH#18921
# Entirely empty
s = Series([])
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert pd.isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# Skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert pd.isna(result)
# All-NA
s = Series([np.nan])
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert pd.isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert pd.isna(result)
# Mix of valid, empty
s = Series([np.nan, 1])
# Default
result = getattr(s, method)()
assert result == 1.0
# Explicit
result = getattr(s, method)(min_count=0)
assert result == 1.0
result = getattr(s, method)(min_count=1)
assert result == 1.0
# Skipna
result = getattr(s, method)(skipna=True)
assert result == 1.0
result = getattr(s, method)(skipna=True, min_count=0)
assert result == 1.0
result = getattr(s, method)(skipna=True, min_count=1)
assert result == 1.0
# GH#844 (changed in GH#9422)
df = DataFrame(np.empty((10, 0)))
assert (getattr(df, method)(1) == unit).all()
s = pd.Series([1])
result = getattr(s, method)(min_count=2)
assert pd.isna(result)
s = pd.Series([np.nan])
result = getattr(s, method)(min_count=2)
assert pd.isna(result)
s = pd.Series([np.nan, 1])
result = getattr(s, method)(min_count=2)
assert pd.isna(result)
@pytest.mark.parametrize("method, unit", [("sum", 0.0), ("prod", 1.0)])
def test_empty_multi(self, method, unit):
s = pd.Series(
[1, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product([("a", "b"), (0, 1)]),
)
# 1 / 0 by default
result = getattr(s, method)(level=0)
expected = pd.Series([1, unit], index=["a", "b"])
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(s, method)(level=0, min_count=0)
expected = pd.Series([1, unit], index=["a", "b"])
tm.assert_series_equal(result, expected)
# min_count=1
result = getattr(s, method)(level=0, min_count=1)
expected = pd.Series([1, np.nan], index=["a", "b"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["mean", "median", "std", "var"])
def test_ops_consistency_on_empty(self, method):
# GH#7869
# consistency on empty
# float
result = getattr(Series(dtype=float), method)()
assert pd.isna(result)
# timedelta64[ns]
result = getattr(Series(dtype="m8[ns]"), method)()
assert result is pd.NaT
def test_nansum_buglet(self):
ser = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(ser)
tm.assert_almost_equal(result, 1)
@pytest.mark.parametrize("use_bottleneck", [True, False])
def test_sum_overflow(self, use_bottleneck):
with pd.option_context("use_bottleneck", use_bottleneck):
# GH#6915
# overflowing on the smaller int dtypes
for dtype in ["int32", "int64"]:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert int(result) == v.sum(dtype="int64")
result = s.min(skipna=False)
assert int(result) == 0
result = s.max(skipna=False)
assert int(result) == v[-1]
for dtype in ["float32", "float64"]:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert result == v.sum(dtype=dtype)
result = s.min(skipna=False)
assert np.allclose(float(result), 0.0)
result = s.max(skipna=False)
assert np.allclose(float(result), v[-1])
def test_empty_timeseries_reductions_return_nat(self):
# covers GH#11245
for dtype in ("m8[ns]", "m8[ns]", "M8[ns]", "M8[ns, UTC]"):
assert Series([], dtype=dtype).min() is pd.NaT
assert Series([], dtype=dtype).max() is pd.NaT
assert Series([], dtype=dtype).min(skipna=False) is pd.NaT
assert Series([], dtype=dtype).max(skipna=False) is pd.NaT
def test_numpy_argmin_deprecated(self):
# See GH#16830
data = np.arange(1, 11)
s = Series(data, index=data)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# The deprecation of Series.argmin also causes a deprecation
# warning when calling np.argmin. This behavior is temporary
# until the implementation of Series.argmin is corrected.
result = np.argmin(s)
assert result == 1
with tm.assert_produces_warning(FutureWarning):
# argmin is aliased to idxmin
result = s.argmin()
assert result == 1
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argmin(s, out=data)
def test_numpy_argmax_deprecated(self):
# See GH#16830
data = np.arange(1, 11)
s = Series(data, index=data)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# The deprecation of Series.argmax also causes a deprecation
# warning when calling np.argmax. This behavior is temporary
# until the implementation of Series.argmax is corrected.
result = np.argmax(s)
assert result == 10
with tm.assert_produces_warning(FutureWarning):
# argmax is aliased to idxmax
result = s.argmax()
assert result == 10
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argmax(s, out=data)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isna check.
string_series = tm.makeStringSeries().rename("series")
# add some NaNs
string_series[5:15] = np.NaN
# skipna or no
assert string_series[string_series.idxmin()] == string_series.min()
assert pd.isna(string_series.idxmin(skipna=False))
# no NaNs
nona = string_series.dropna()
assert nona[nona.idxmin()] == nona.min()
assert nona.index.values.tolist().index(nona.idxmin()) == nona.values.argmin()
# all NaNs
allna = string_series * np.nan
assert pd.isna(allna.idxmin())
# datetime64[ns]
s = Series(pd.date_range("20130102", periods=6))
result = s.idxmin()
assert result == 0
s[0] = np.nan
result = s.idxmin()
assert result == 1
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isna check.
string_series = tm.makeStringSeries().rename("series")
# add some NaNs
string_series[5:15] = np.NaN
# skipna or no
assert string_series[string_series.idxmax()] == string_series.max()
assert pd.isna(string_series.idxmax(skipna=False))
# no NaNs
nona = string_series.dropna()
assert nona[nona.idxmax()] == nona.max()
assert nona.index.values.tolist().index(nona.idxmax()) == nona.values.argmax()
# all NaNs
allna = string_series * np.nan
assert pd.isna(allna.idxmax())
from pandas import date_range
s = Series(date_range("20130102", periods=6))
result = s.idxmax()
assert result == 5
s[5] = np.nan
result = s.idxmax()
assert result == 4
# Float64Index
# GH#5914
s = pd.Series([1, 2, 3], [1.1, 2.1, 3.1])
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
s = pd.Series(s.index, s.index)
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
assert not bool_series.all()
assert bool_series.any()
# Alternative types, with implicit 'object' dtype.
s = Series(["abc", True])
assert "abc" == s.any() # 'abc' || True => 'abc'
def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
assert s1.all(skipna=False) # nan && True => True
assert s1.all(skipna=True)
assert np.isnan(s2.any(skipna=False)) # nan || False => nan
assert not s2.any(skipna=True)
# Check level.
s = pd.Series([False, False, True, True, False, True], index=[0, 0, 1, 1, 2, 2])
tm.assert_series_equal(s.all(level=0), Series([False, True, False]))
tm.assert_series_equal(s.any(level=0), Series([False, True, True]))
# bool_only is not implemented with level option.
with pytest.raises(NotImplementedError):
s.any(bool_only=True, level=0)
with pytest.raises(NotImplementedError):
s.all(bool_only=True, level=0)
# bool_only is not implemented alone.
with pytest.raises(NotImplementedError):
s.any(bool_only=True)
with pytest.raises(NotImplementedError):
s.all(bool_only=True)
def test_timedelta64_analytics(self):
# index min/max
dti = pd.date_range("2012-1-1", periods=3, freq="D")
td = Series(dti) - pd.Timestamp("20120101")
result = td.idxmin()
assert result == 0
result = td.idxmax()
assert result == 2
# GH#2982
# with NaT
td[0] = np.nan
result = td.idxmin()
assert result == 1
result = td.idxmax()
assert result == 2
# abs
s1 = Series(pd.date_range("20120101", periods=3))
s2 = Series(pd.date_range("20120102", periods=3))
expected = Series(s2 - s1)
# FIXME: don't leave commented-out code
# this fails as numpy returns timedelta64[us]
# result = np.abs(s1-s2)
# assert_frame_equal(result,expected)
result = (s1 - s2).abs()
tm.assert_series_equal(result, expected)
# max/min
result = td.max()
expected = pd.Timedelta("2 days")
assert result == expected
result = td.min()
expected = pd.Timedelta("1 days")
assert result == expected
@pytest.mark.parametrize(
"test_input,error_type",
[
(pd.Series([]), ValueError),
# For strings, or any Series with dtype 'O'
(pd.Series(["foo", "bar", "baz"]), TypeError),
(pd.Series([(1,), (2,)]), TypeError),
# For mixed data types
(pd.Series(["foo", "foo", "bar", "bar", None, np.nan, "baz"]), TypeError),
],
)
def test_assert_idxminmax_raises(self, test_input, error_type):
"""
Cases where ``Series.argmax`` and related should raise an exception
"""
with pytest.raises(error_type):
test_input.idxmin()
with pytest.raises(error_type):
test_input.idxmin(skipna=False)
with pytest.raises(error_type):
test_input.idxmax()
with pytest.raises(error_type):
test_input.idxmax(skipna=False)
def test_idxminmax_with_inf(self):
# For numeric data with NA and Inf (GH #13595)
s = pd.Series([0, -np.inf, np.inf, np.nan])
assert s.idxmin() == 1
assert np.isnan(s.idxmin(skipna=False))
assert s.idxmax() == 2
assert np.isnan(s.idxmax(skipna=False))
# Using old-style behavior that treats floating point nan, -inf, and
# +inf as missing
with pd.option_context("mode.use_inf_as_na", True):
assert s.idxmin() == 0
assert np.isnan(s.idxmin(skipna=False))
assert s.idxmax() == 0
np.isnan(s.idxmax(skipna=False))
class TestDatetime64SeriesReductions:
# Note: the name TestDatetime64SeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@pytest.mark.parametrize(
"nat_ser",
[
Series([pd.NaT, pd.NaT]),
Series([pd.NaT, pd.Timedelta("nat")]),
Series([pd.Timedelta("nat"), pd.Timedelta("nat")]),
],
)
def test_minmax_nat_series(self, nat_ser):
# GH#23282
assert nat_ser.min() is pd.NaT
assert nat_ser.max() is pd.NaT
assert nat_ser.min(skipna=False) is pd.NaT
assert nat_ser.max(skipna=False) is pd.NaT
@pytest.mark.parametrize(
"nat_df",
[
pd.DataFrame([pd.NaT, pd.NaT]),
pd.DataFrame([pd.NaT, pd.Timedelta("nat")]),
pd.DataFrame([pd.Timedelta("nat"), pd.Timedelta("nat")]),
],
)
def test_minmax_nat_dataframe(self, nat_df):
# GH#23282
assert nat_df.min()[0] is pd.NaT
assert nat_df.max()[0] is pd.NaT
assert nat_df.min(skipna=False)[0] is pd.NaT
assert nat_df.max(skipna=False)[0] is pd.NaT
def test_min_max(self):
rng = pd.date_range("1/1/2000", "12/31/2000")
rng2 = rng.take(np.random.permutation(len(rng)))
the_min = rng2.min()
the_max = rng2.max()
assert isinstance(the_min, pd.Timestamp)
assert isinstance(the_max, pd.Timestamp)
assert the_min == rng[0]
assert the_max == rng[-1]
assert rng.min() == rng[0]
assert rng.max() == rng[-1]
def test_min_max_series(self):
rng = pd.date_range("1/1/2000", periods=10, freq="4h")
lvls = ["A", "A", "A", "B", "B", "B", "C", "C", "C", "C"]
df = DataFrame({"TS": rng, "V": np.random.randn(len(rng)), "L": lvls})
result = df.TS.max()
exp = pd.Timestamp(df.TS.iat[-1])
assert isinstance(result, pd.Timestamp)
assert result == exp
result = df.TS.min()
exp = pd.Timestamp(df.TS.iat[0])
assert isinstance(result, pd.Timestamp)
assert result == exp
class TestCategoricalSeriesReductions:
# Note: the name TestCategoricalSeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
with pytest.raises(TypeError):
cat.min()
with pytest.raises(TypeError):
cat.max()
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Series(
Categorical(
["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True
)
)
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
cat = Series(
Categorical(
[np.nan, "b", "c", np.nan],
categories=["d", "c", "b", "a"],
ordered=True,
)
)
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == "b"
cat = Series(
Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
)
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == 1
def test_min_max_numeric_only(self):
# TODO deprecate numeric_only argument for Categorical and use
# skipna as well, see GH25303
cat = Series(
Categorical(["a", "b", np.nan, "a"], categories=["b", "a"], ordered=True)
)
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == "a"
_min = cat.min(numeric_only=True)
_max = cat.max(numeric_only=True)
assert _min == "b"
assert _max == "a"
_min = cat.min(numeric_only=False)
_max = cat.max(numeric_only=False)
assert np.isnan(_min)
assert _max == "a"
class TestSeriesMode:
# Note: the name TestSeriesMode indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@pytest.mark.parametrize(
"dropna, expected",
[(True, Series([], dtype=np.float64)), (False, Series([], dtype=np.float64))],
)
def test_mode_empty(self, dropna, expected):
s = Series([], dtype=np.float64)
result = s.mode(dropna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, data, expected",
[
(True, [1, 1, 1, 2], [1]),
(True, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
(False, [1, 1, 1, 2], [1]),
(False, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
],
)
@pytest.mark.parametrize(
"dt", list(np.typecodes["AllInteger"] + np.typecodes["Float"])
)
def test_mode_numerical(self, dropna, data, expected, dt):
s = Series(data, dtype=dt)
result = s.mode(dropna)
expected = Series(expected, dtype=dt)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna, expected", [(True, [1.0]), (False, [1, np.nan])])
def test_mode_numerical_nan(self, dropna, expected):
s = Series([1, 1, 2, np.nan, np.nan])
result = s.mode(dropna)
expected = Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, expected1, expected2, expected3",
[(True, ["b"], ["bar"], ["nan"]), (False, ["b"], [np.nan], ["nan"])],
)
def test_mode_str_obj(self, dropna, expected1, expected2, expected3):
# Test string and object types.
data = ["a"] * 2 + ["b"] * 3
s = Series(data, dtype="c")
result = s.mode(dropna)
expected1 = Series(expected1, dtype="c")
tm.assert_series_equal(result, expected1)
data = ["foo", "bar", "bar", np.nan, np.nan, np.nan]
s = Series(data, dtype=object)
result = s.mode(dropna)
expected2 = Series(expected2, dtype=object)
tm.assert_series_equal(result, expected2)
data = ["foo", "bar", "bar", np.nan, np.nan, np.nan]
s = Series(data, dtype=object).astype(str)
result = s.mode(dropna)
expected3 = Series(expected3, dtype=str)
tm.assert_series_equal(result, expected3)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[(True, ["foo"], ["foo"]), (False, ["foo"], [np.nan])],
)
def test_mode_mixeddtype(self, dropna, expected1, expected2):
s = Series([1, "foo", "foo"])
result = s.mode(dropna)
expected = Series(expected1)
tm.assert_series_equal(result, expected)
s = Series([1, "foo", "foo", np.nan, np.nan, np.nan])
result = s.mode(dropna)
expected = Series(expected2, dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[
(
True,
["1900-05-03", "2011-01-03", "2013-01-02"],
["2011-01-03", "2013-01-02"],
),
(False, [np.nan], [np.nan, "2011-01-03", "2013-01-02"]),
],
)
def test_mode_datetime(self, dropna, expected1, expected2):
s = Series(
["2011-01-03", "2013-01-02", "1900-05-03", "nan", "nan"], dtype="M8[ns]"
)
result = s.mode(dropna)
expected1 = Series(expected1, dtype="M8[ns]")
tm.assert_series_equal(result, expected1)
s = Series(
[
"2011-01-03",
"2013-01-02",
"1900-05-03",
"2011-01-03",
"2013-01-02",
"nan",
"nan",
],
dtype="M8[ns]",
)
result = s.mode(dropna)
expected2 = Series(expected2, dtype="M8[ns]")
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[
(True, ["-1 days", "0 days", "1 days"], ["2 min", "1 day"]),
(False, [np.nan], [np.nan, "2 min", "1 day"]),
],
)
def test_mode_timedelta(self, dropna, expected1, expected2):
# gh-5986: Test timedelta types.
s = Series(
["1 days", "-1 days", "0 days", "nan", "nan"], dtype="timedelta64[ns]"
)
result = s.mode(dropna)
expected1 = Series(expected1, dtype="timedelta64[ns]")
tm.assert_series_equal(result, expected1)
s = Series(
[
"1 day",
"1 day",
"-1 day",
"-1 day 2 min",
"2 min",
"2 min",
"nan",
"nan",
],
dtype="timedelta64[ns]",
)
result = s.mode(dropna)
expected2 = Series(expected2, dtype="timedelta64[ns]")
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize(
"dropna, expected1, expected2, expected3",
[
(
True,
Categorical([1, 2], categories=[1, 2]),
Categorical(["a"], categories=[1, "a"]),
Categorical([3, 1], categories=[3, 2, 1], ordered=True),
),
(
False,
Categorical([np.nan], categories=[1, 2]),
Categorical([np.nan, "a"], categories=[1, "a"]),
Categorical([np.nan, 3, 1], categories=[3, 2, 1], ordered=True),
),
],
)
def test_mode_category(self, dropna, expected1, expected2, expected3):
s = Series(Categorical([1, 2, np.nan, np.nan]))
result = s.mode(dropna)
expected1 = Series(expected1, dtype="category")
tm.assert_series_equal(result, expected1)
s = Series(Categorical([1, "a", "a", np.nan, np.nan]))
result = s.mode(dropna)
expected2 = Series(expected2, dtype="category")
tm.assert_series_equal(result, expected2)
s = Series(
Categorical(
[1, 1, 2, 3, 3, np.nan, np.nan], categories=[3, 2, 1], ordered=True
)
)
result = s.mode(dropna)
expected3 = Series(expected3, dtype="category")
tm.assert_series_equal(result, expected3)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[(True, [2 ** 63], [1, 2 ** 63]), (False, [2 ** 63], [1, 2 ** 63])],
)
def test_mode_intoverflow(self, dropna, expected1, expected2):
# Test for uint64 overflow.
s = Series([1, 2 ** 63, 2 ** 63], dtype=np.uint64)
result = s.mode(dropna)
expected1 = Series(expected1, dtype=np.uint64)
tm.assert_series_equal(result, expected1)
s = Series([1, 2 ** 63], dtype=np.uint64)
result = s.mode(dropna)
expected2 = Series(expected2, dtype=np.uint64)
tm.assert_series_equal(result, expected2)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
expected = Series(["foo", np.nan])
s = Series([1, "foo", "foo", np.nan, np.nan])
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = s.mode(dropna=False)
result = result.sort_values().reset_index(drop=True)
tm.assert_series_equal(result, expected)
|
bsd-3-clause
|
mfjb/scikit-learn
|
sklearn/linear_model/setup.py
|
169
|
1567
|
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
mwsmith2/traveler-db
|
scripts/enter_old_nmr_data.py
|
1
|
2167
|
#!/bin/python
import couchdb, sys, json, os, glob, time, csv
import matplotlib.pyplot as plt
import numpy as np
from uuid import uuid4
# This script creates/replaces a fake database
# Variables
db_name = 'traveler_db'
path_to_file = os.path.dirname(os.path.realpath(__file__))
def unique_filename(upload_file):
"""Take a base filename, add characters to make it more unique, and
ensure that it is a secure filename."""
filename = os.path.basename(upload_file).split('.')[0]
filename += '-' + str(uuid4().hex[-12:])
filename += os.path.splitext(upload_file)[1]
return filename
def upload_path(filename):
"""Construct the full path of the uploaded file."""
basename = os.path.basename(filename)
return os.path.normpath(path_to_file + '/../traveler/uploads/' + basename)
def make_entry(row, line_num):
doc = {}
doc['type'] = 'nmr'
doc['title'] = 'NMR Probe'
doc['id'] = line_num
doc['location'] = "UW"
doc['author'] = "Rachel/Cole"
doc['date'] = row[0]
if '/' in row[1]:
doc['base_id'] = row[1].split('/')[0]
doc['shell_id'] = row[1].split('/')[1]
else:
doc['base_id'] = row[1]
doc['sn'] = doc['base_id']
if row[2] != '':
doc['amplitude'] = float(row[2])
else:
doc['amplitude'] = 0.0
if row[4] == 'BROKEN':
doc['status'] = 'broken'
elif row[5] != '':
doc['status'] = 'flagged'
else:
doc['status'] = 'ready'
doc['Q-factor'] = 0.0
if row[6].lower() == 'yes':
doc['asymmetric'] = 'true'
else:
doc['asymmetric'] = 'false'
# Need a dictionary to make plots easily replaceable when updating.
doc['plots'] = {}
doc['opt_file'] = []
doc['opt_img'] = []
doc['opt_num'] = []
# return the doc
return doc
if __name__ == '__main__':
# Connect to CouchDB.
try:
client = couchdb.Server()
except:
print "Couldn't connect to couchdb. Make sure the couchdb server is running."
sys.exit()
# Get the database.
db = client[db_name]
# Get the data.
data = csv.reader(open('nmr_data.csv', 'rU'), delimiter=',')
# Now start adding entries into the the database.
for i, line in enumerate(data):
if i == 0:
continue
doc = make_entry(line, i)
db.save(doc)
|
mit
|
kaiserroll14/301finalproject
|
main/pandas/rpy/common.py
|
14
|
9576
|
"""
Utilities for making working with rpy2 more user- and
developer-friendly.
"""
from __future__ import print_function
from distutils.version import LooseVersion
from pandas.compat import zip, range
import numpy as np
import pandas as pd
import pandas.core.common as com
import pandas.util.testing as _test
from rpy2.robjects.packages import importr
from rpy2.robjects import r
import rpy2.robjects as robj
import itertools as IT
__all__ = ['convert_robj', 'load_data', 'convert_to_r_dataframe',
'convert_to_r_matrix']
def load_data(name, package=None, convert=True):
if package:
importr(package)
r.data(name)
robj = r[name]
if convert:
return convert_robj(robj)
else:
return robj
def _rclass(obj):
"""
Return R class name for input object
"""
return r['class'](obj)[0]
def _is_null(obj):
return _rclass(obj) == 'NULL'
def _convert_list(obj):
"""
Convert named Vector to dict, factors to list
"""
try:
values = [convert_robj(x) for x in obj]
keys = r['names'](obj)
return dict(zip(keys, values))
except TypeError:
# For state.division and state.region
factors = list(r['factor'](obj))
level = list(r['levels'](obj))
result = [level[index-1] for index in factors]
return result
def _convert_array(obj):
"""
Convert Array to DataFrame
"""
def _list(item):
try:
return list(item)
except TypeError:
return []
# For iris3, HairEyeColor, UCBAdmissions, Titanic
dim = list(obj.dim)
values = np.array(list(obj))
names = r['dimnames'](obj)
try:
columns = list(r['names'](names))[::-1]
except TypeError:
columns = ['X{:d}'.format(i) for i in range(len(names))][::-1]
columns.append('value')
name_list = [(_list(x) or range(d)) for x, d in zip(names, dim)][::-1]
arr = np.array(list(IT.product(*name_list)))
arr = np.column_stack([arr,values])
df = pd.DataFrame(arr, columns=columns)
return df
def _convert_vector(obj):
if isinstance(obj, robj.IntVector):
return _convert_int_vector(obj)
elif isinstance(obj, robj.StrVector):
return _convert_str_vector(obj)
# Check if the vector has extra information attached to it that can be used
# as an index
try:
attributes = set(r['attributes'](obj).names)
except AttributeError:
return list(obj)
if 'names' in attributes:
return pd.Series(list(obj), index=r['names'](obj))
elif 'tsp' in attributes:
return pd.Series(list(obj), index=r['time'](obj))
elif 'labels' in attributes:
return pd.Series(list(obj), index=r['labels'](obj))
if _rclass(obj) == 'dist':
# For 'eurodist'. WARNING: This results in a DataFrame, not a Series or list.
matrix = r['as.matrix'](obj)
return convert_robj(matrix)
else:
return list(obj)
NA_INTEGER = -2147483648
def _convert_int_vector(obj):
arr = np.asarray(obj)
mask = arr == NA_INTEGER
if mask.any():
arr = arr.astype(float)
arr[mask] = np.nan
return arr
def _convert_str_vector(obj):
arr = np.asarray(obj, dtype=object)
mask = arr == robj.NA_Character
if mask.any():
arr[mask] = np.nan
return arr
def _convert_DataFrame(rdf):
columns = list(rdf.colnames)
rows = np.array(rdf.rownames)
data = {}
for i, col in enumerate(columns):
vec = rdf.rx2(i + 1)
values = _convert_vector(vec)
if isinstance(vec, robj.FactorVector):
levels = np.asarray(vec.levels)
if com.is_float_dtype(values):
mask = np.isnan(values)
notmask = -mask
result = np.empty(len(values), dtype=object)
result[mask] = np.nan
locs = (values[notmask] - 1).astype(np.int_)
result[notmask] = levels.take(locs)
values = result
else:
values = np.asarray(vec.levels).take(values - 1)
data[col] = values
return pd.DataFrame(data, index=_check_int(rows), columns=columns)
def _convert_Matrix(mat):
columns = mat.colnames
rows = mat.rownames
columns = None if _is_null(columns) else list(columns)
index = r['time'](mat) if _is_null(rows) else list(rows)
return pd.DataFrame(np.array(mat), index=_check_int(index),
columns=columns)
def _check_int(vec):
try:
# R observation numbers come through as strings
vec = vec.astype(int)
except Exception:
pass
return vec
_pandas_converters = [
(robj.DataFrame, _convert_DataFrame),
(robj.Matrix, _convert_Matrix),
(robj.StrVector, _convert_vector),
(robj.FloatVector, _convert_vector),
(robj.Array, _convert_array),
(robj.Vector, _convert_list),
]
_converters = [
(robj.DataFrame, lambda x: _convert_DataFrame(x).toRecords(index=False)),
(robj.Matrix, lambda x: _convert_Matrix(x).toRecords(index=False)),
(robj.IntVector, _convert_vector),
(robj.StrVector, _convert_vector),
(robj.FloatVector, _convert_vector),
(robj.Array, _convert_array),
(robj.Vector, _convert_list),
]
def convert_robj(obj, use_pandas=True):
"""
Convert rpy2 object to a pandas-friendly form
Parameters
----------
obj : rpy2 object
Returns
-------
Non-rpy data structure, mix of NumPy and pandas objects
"""
if not isinstance(obj, robj.RObjectMixin):
return obj
converters = _pandas_converters if use_pandas else _converters
for rpy_type, converter in converters:
if isinstance(obj, rpy_type):
return converter(obj)
raise TypeError('Do not know what to do with %s object' % type(obj))
def convert_to_r_posixct(obj):
"""
Convert DatetimeIndex or np.datetime array to R POSIXct using
m8[s] format.
Parameters
----------
obj : source pandas object (one of [DatetimeIndex, np.datetime])
Returns
-------
An R POSIXct vector (rpy2.robjects.vectors.POSIXct)
"""
import time
from rpy2.rinterface import StrSexpVector
# convert m8[ns] to m8[s]
vals = robj.vectors.FloatSexpVector(obj.values.view('i8') / 1E9)
as_posixct = robj.baseenv.get('as.POSIXct')
origin = StrSexpVector([time.strftime("%Y-%m-%d",
time.gmtime(0)), ])
# We will be sending ints as UTC
tz = obj.tz.zone if hasattr(
obj, 'tz') and hasattr(obj.tz, 'zone') else 'UTC'
tz = StrSexpVector([tz])
utc_tz = StrSexpVector(['UTC'])
posixct = as_posixct(vals, origin=origin, tz=utc_tz)
posixct.do_slot_assign('tzone', tz)
return posixct
VECTOR_TYPES = {np.float64: robj.FloatVector,
np.float32: robj.FloatVector,
np.float: robj.FloatVector,
np.int: robj.IntVector,
np.int32: robj.IntVector,
np.int64: robj.IntVector,
np.object_: robj.StrVector,
np.str: robj.StrVector,
np.bool: robj.BoolVector}
NA_TYPES = {np.float64: robj.NA_Real,
np.float32: robj.NA_Real,
np.float: robj.NA_Real,
np.int: robj.NA_Integer,
np.int32: robj.NA_Integer,
np.int64: robj.NA_Integer,
np.object_: robj.NA_Character,
np.str: robj.NA_Character,
np.bool: robj.NA_Logical}
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
for dict_ in (VECTOR_TYPES, NA_TYPES):
dict_.update({
np.bool_: dict_[np.bool],
np.int_: dict_[np.int],
np.float_: dict_[np.float],
np.string_: dict_[np.str]
})
def convert_to_r_dataframe(df, strings_as_factors=False):
"""
Convert a pandas DataFrame to a R data.frame.
Parameters
----------
df: The DataFrame being converted
strings_as_factors: Whether to turn strings into R factors (default: False)
Returns
-------
A R data.frame
"""
import rpy2.rlike.container as rlc
columns = rlc.OrdDict()
# FIXME: This doesn't handle MultiIndex
for column in df:
value = df[column]
value_type = value.dtype.type
if value_type == np.datetime64:
value = convert_to_r_posixct(value)
else:
value = [item if pd.notnull(item) else NA_TYPES[value_type]
for item in value]
value = VECTOR_TYPES[value_type](value)
if not strings_as_factors:
I = robj.baseenv.get("I")
value = I(value)
columns[column] = value
r_dataframe = robj.DataFrame(columns)
del columns
r_dataframe.rownames = robj.StrVector(df.index)
return r_dataframe
def convert_to_r_matrix(df, strings_as_factors=False):
"""
Convert a pandas DataFrame to a R matrix.
Parameters
----------
df: The DataFrame being converted
strings_as_factors: Whether to turn strings into R factors (default: False)
Returns
-------
A R matrix
"""
if df._is_mixed_type:
raise TypeError("Conversion to matrix only possible with non-mixed "
"type DataFrames")
r_dataframe = convert_to_r_dataframe(df, strings_as_factors)
as_matrix = robj.baseenv.get("as.matrix")
r_matrix = as_matrix(r_dataframe)
return r_matrix
if __name__ == '__main__':
pass
|
gpl-3.0
|
gclenaghan/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
226
|
3941
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
|
bsd-3-clause
|
mjudsp/Tsallis
|
examples/linear_model/plot_ransac.py
|
73
|
1859
|
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
lw = 2
plt.scatter(X[inlier_mask], y[inlier_mask], color='yellowgreen', marker='.',
label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask], color='gold', marker='.',
label='Outliers')
plt.plot(line_X, line_y, color='navy', linestyle='-', linewidth=lw,
label='Linear regressor')
plt.plot(line_X, line_y_ransac, color='cornflowerblue', linestyle='-',
linewidth=lw, label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
|
bsd-3-clause
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/spyderlib/utils/introspection/rope_plugin.py
|
2
|
13334
|
# -*- coding: utf-8 -*-
#
# Copyright © 2013 The Spyder Development Team
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
Rope introspection plugin
"""
import time
from spyderlib import dependencies
from spyderlib.baseconfig import get_conf_path, _, STDERR
from spyderlib.utils import encoding, programs
from spyderlib.py3compat import PY2
from spyderlib.utils.dochelpers import getsignaturefromtext
from spyderlib.utils import sourcecode
from spyderlib.utils.debug import log_last_error, log_dt
from spyderlib.utils.introspection.plugin_manager import (
DEBUG_EDITOR, LOG_FILENAME, IntrospectionPlugin)
try:
try:
from spyderlib import rope_patch
rope_patch.apply()
except ImportError:
# rope 0.9.2/0.9.3 is not installed
pass
import rope.base.libutils
import rope.contrib.codeassist
except ImportError:
pass
ROPE_REQVER = '>=0.9.2'
dependencies.add('rope',
_("Editor's code completion, go-to-definition and help"),
required_version=ROPE_REQVER)
#TODO: The following preferences should be customizable in the future
ROPE_PREFS = {'ignore_syntax_errors': True,
'ignore_bad_imports': True,
'soa_followed_calls': 2,
'extension_modules': [],
}
class RopePlugin(IntrospectionPlugin):
"""
Rope based introspection plugin for jedi
Editor's code completion, go-to-definition and help
"""
project = None
# ---- IntrospectionPlugin API --------------------------------------------
name = 'rope'
def load_plugin(self):
print("def load_plugin(self):")
"""Load the Rope introspection plugin"""
if not programs.is_module_installed('rope', ROPE_REQVER):
raise ImportError('Requires Rope %s' % ROPE_REQVER)
self.project = None
self.create_rope_project(root_path=get_conf_path())
def get_completions(self, info):
print("def get_completions(self, info):")
"""Get a list of completions using Rope"""
if self.project is None:
return
filename = info.filename
source_code = info.source_code
offset = info.position
if PY2:
filename = filename.encode('utf-8')
else:
# TODO: test if this is working without any further change in
# Python 3 with a user account containing unicode characters
pass
try:
resource = rope.base.libutils.path_to_resource(self.project,
filename)
except Exception as _error:
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "path_to_resource: %r" % filename)
resource = None
try:
if DEBUG_EDITOR:
t0 = time.time()
proposals = rope.contrib.codeassist.code_assist(self.project,
source_code, offset, resource, maxfixes=3)
proposals = rope.contrib.codeassist.sorted_proposals(proposals)
lista = [proposal.name for proposal in proposals]
No_underscore_list = []
for i in lista:
if '_' not in i:
No_underscore_list.append(i)
if DEBUG_EDITOR:
log_dt(LOG_FILENAME, "code_assist/sorted_proposals", t0)
#remove_list = ['c()','c']
remove_list = ["GetLocationOfAndroidImage",
"GetLocationOfImage",
"GetRect",
'c()',
'c',
"Realc",
"Realr",
"Show",
"ShowAndroid",
"StartMonitorApp",
"Tap",
"TapOld",
"r",
"t",
"Application",
"Cycles",
"DisplayName",
"ExcludedWindowClassList",
"FriendNameOfButton",
"FullButtonPathName",
"ImageOfButton",
"MaxVal",
"NameOfButton",
"NameOfButtonCapitalized",
"Threshold",
"name"]
for i in remove_list:
try:
No_underscore_list.remove(i)
except Exception as e:
print(str(e))
for i in No_underscore_list:
print(i + '\n')
return No_underscore_list
except Exception as _error: # analysis:ignore
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "get_completion_list")
def get_info(self, info):
print("def get_info(self, info):")
"""Get a formatted calltip and docstring from Rope"""
if self.project is None:
return
filename = info.filename
source_code = info.source_code
offset = info.position
if PY2:
filename = filename.encode('utf-8')
else:
#TODO: test if this is working without any further change in
# Python 3 with a user account containing unicode characters
pass
try:
resource = rope.base.libutils.path_to_resource(self.project,
filename)
except Exception as _error:
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "path_to_resource: %r" % filename)
resource = None
try:
if DEBUG_EDITOR:
t0 = time.time()
cts = rope.contrib.codeassist.get_calltip(
self.project, source_code, offset, resource,
ignore_unknown=False, remove_self=True, maxfixes=3)
if DEBUG_EDITOR:
log_dt(LOG_FILENAME, "get_calltip", t0)
if cts is not None:
while '..' in cts:
cts = cts.replace('..', '.')
if '(.)' in cts:
cts = cts.replace('(.)', '(...)')
try:
doc_text = rope.contrib.codeassist.get_doc(self.project,
source_code, offset, resource, maxfixes=3)
if DEBUG_EDITOR:
log_dt(LOG_FILENAME, "get_doc", t0)
except Exception as _error:
doc_text = ''
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "get_doc")
return self.handle_info(cts, doc_text, source_code, offset)
except Exception as _error: #analysis:ignore
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "get_calltip_text")
def handle_info(self, cts, doc_text, source_code, offset):
print("def handle_info(self, cts, doc_text, source_code, offset):")
obj_fullname = ''
calltip = ''
argspec = ''
note = ''
if cts:
cts = cts.replace('.__init__', '')
parpos = cts.find('(')
if parpos:
obj_fullname = cts[:parpos]
obj_name = obj_fullname.split('.')[-1]
cts = cts.replace(obj_fullname, obj_name)
calltip = cts
if ('()' in cts) or ('(...)' in cts):
# Either inspected object has no argument, or it's
# a builtin or an extension -- in this last case
# the following attempt may succeed:
calltip = getsignaturefromtext(doc_text, obj_name)
if not obj_fullname:
obj_fullname = sourcecode.get_primary_at(source_code, offset)
if obj_fullname and not obj_fullname.startswith('self.'):
# doc_text was generated by utils.dochelpers.getdoc
if type(doc_text) is dict:
obj_fullname = doc_text['name'] or obj_fullname
argspec = doc_text['argspec']
note = doc_text['note']
doc_text = doc_text['docstring']
elif calltip:
argspec_st = calltip.find('(')
argspec = calltip[argspec_st:]
module_end = obj_fullname.rfind('.')
module = obj_fullname[:module_end]
note = 'Present in %s module' % module
return dict(name=obj_fullname, argspec=argspec, note=note,
docstring=doc_text, calltip=calltip)
def get_definition(self, info):
print("def get_definition(self, info):")
"""Find a definition location using Rope"""
if self.project is None:
return
filename = info.filename
source_code = info.source_code
offset = info.position
if PY2:
filename = filename.encode('utf-8')
else:
#TODO: test if this is working without any further change in
# Python 3 with a user account containing unicode characters
pass
try:
resource = rope.base.libutils.path_to_resource(self.project,
filename)
except Exception as _error:
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "path_to_resource: %r" % filename)
resource = None
try:
if DEBUG_EDITOR:
t0 = time.time()
resource, lineno = rope.contrib.codeassist.get_definition_location(
self.project, source_code, offset, resource, maxfixes=3)
if DEBUG_EDITOR:
log_dt(LOG_FILENAME, "get_definition_location", t0)
if resource is not None:
filename = resource.real_path
if filename and lineno:
return filename, lineno
except Exception as _error: #analysis:ignore
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "get_definition_location")
def validate(self):
print("def validate(self):")
"""Validate the Rope project"""
if self.project is not None:
self.project.validate(self.project.root)
def set_pref(self, key, value):
print("def set_pref(self, key, value):")
"""Set a Rope preference"""
if self.project is not None:
self.project.prefs.set(key, value)
# ---- Private API -------------------------------------------------------
def create_rope_project(self, root_path):
print("def create_rope_project(self, root_path):")
"""Create a Rope project on a desired path"""
if PY2:
root_path = encoding.to_fs_from_unicode(root_path)
else:
#TODO: test if this is working without any further change in
# Python 3 with a user account containing unicode characters
pass
try:
import rope.base.project
self.project = rope.base.project.Project(root_path, **ROPE_PREFS)
except ImportError:
print >>STDERR, 'project error'
self.project = None
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME,
"create_rope_project: %r" % root_path)
except TypeError:
# Compatibility with new Mercurial API (>= 1.3).
# New versions of rope (> 0.9.2) already handle this issue
self.project = None
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME,
"create_rope_project: %r" % root_path)
self.validate()
def close_rope_project(self):
print("def close_rope_project(self):")
"""Close the Rope project"""
if self.project is not None:
self.project.close()
if __name__ == '__main__':
from spyderlib.utils.introspection.plugin_manager import CodeInfo
p = RopePlugin()
p.load_plugin()
source_code = "import numpy; numpy.ones"
docs = p.get_info(CodeInfo('info', source_code, len(source_code),
__file__))
assert 'ones(' in docs['calltip'] and 'ones(' in docs['docstring']
source_code = "import numpy; n"
completions = p.get_completions(CodeInfo('completions', source_code,
len(source_code), __file__))
assert 'numpy' in completions
source_code = "import matplotlib.pyplot as plt; plt.imsave"
path, line_nr = p.get_definition(CodeInfo('definition', source_code,
len(source_code), __file__))
assert 'pyplot.py' in path
code = '''
def test(a, b):
"""Test docstring"""
pass
test(1,'''
path, line = p.get_definition(CodeInfo('definition', code, len(code),
'dummy.txt'))
assert line == 2
docs = p.get_info(CodeInfo('info', code, len(code), __file__))
assert 'Test docstring' in docs['docstring']
|
gpl-3.0
|
plissonf/scikit-learn
|
sklearn/gaussian_process/tests/test_gaussian_process.py
|
267
|
6813
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
|
bsd-3-clause
|
juanitopereza/Monografia
|
512_150k/bolshoi_128/vff.py
|
1
|
3500
|
from ast import literal_eval
from struct import *
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
voids, sheets, fils, knots = [], [], [], []
def read_CIC_scalar(filename):
f = open(filename, "rb")
dumb = f.read(38)
dumb = f.read(4)
n_x = f.read(4)
n_y = f.read(4)
n_z = f.read(4)
nodes = f.read(8)
x0 = f.read(4)
y0 = f.read(4)
z0 = f.read(4)
dx = f.read(4)
dy = f.read(4)
dz = f.read(4)
dumb = f.read(4)
n_x = (unpack('i', n_x))[0]
n_y = (unpack('i', n_y))[0]
n_z = (unpack('i', n_z))[0]
nodes = (unpack('q', nodes))[0]
dx = (unpack('f', dx))[0]
dy = (unpack('f', dy))[0]
dz = (unpack('f', dz))[0]
x0 = (unpack('f', x0))[0]
y0 = (unpack('f', y0))[0]
z0 = (unpack('f', z0))[0]
print n_x, n_y, n_z, nodes, dx, dy, dz
total_nodes = n_x * n_y *n_z
dumb = f.read(4)
array_data = f.read(total_nodes*4)
dumb = f.read(4)
format_s = str(total_nodes)+'f'
array_data = unpack(format_s, array_data)
f.close()
array_data = np.array(array_data)
array_data.resize(n_z,n_y,n_x)
array_data = array_data.transpose()
return array_data
def classify(file1, file2, file3, lambda_th):
eigenval1 = read_CIC_scalar(file1)
eigenval2 = read_CIC_scalar(file2)
eigenval3 = read_CIC_scalar(file3)
val1 = eigenval1 >= lambda_th*np.ones(np.shape(eigenval1))
val2 = eigenval2 >= lambda_th*np.ones(np.shape(eigenval2))
val3 = eigenval3 >= lambda_th*np.ones(np.shape(eigenval3))
chespirito = val1.astype(int) + val2.astype(int) + val3.astype(int)
void = np.where(chespirito == 0)
sheet = np.where(chespirito == 1)
fil = np.where(chespirito == 2)
knot = np.where(chespirito == 3)
# print void
voids.append(np.shape(void)[1]/128.0**3)
sheets.append(np.shape(sheet)[1]/128.0**3)
fils.append(np.shape(fil)[1]/128.0**3)
knots.append(np.shape(knot)[1]/128.0**3)
#filein="/store/04/bolshoi/V-web/clues/256/snap_190.CIC.s8.00.eigen_1"
#eigen_1 = read_CIC_scalar(filein)
for i in range(0,31,1):
if (i<10):
file1="/hpcfs/home/ciencias/fisica/pregrado/js.perez20/Gadget-2.0.7/512_150k/bolshoi_128/snapshot_00{0}.eigen_1".format(i)
print file1
file2="/hpcfs/home/ciencias/fisica/pregrado/js.perez20/Gadget-2.0.7/512_150k/bolshoi_128/snapshot_00{0}.eigen_2".format(i)
print file2
file3="/hpcfs/home/ciencias/fisica/pregrado/js.perez20/Gadget-2.0.7/512_150k/bolshoi_128/snapshot_00{0}.eigen_3".format(i)
print file3
classify(file1, file2, file3, 0.2)
else:
file1="/hpcfs/home/ciencias/fisica/pregrado/js.perez20/Gadget-2.0.7/512_150k/bolshoi_128/snapshot_0{0}.eigen_1".format(i)
print file1
file2="/hpcfs/home/ciencias/fisica/pregrado/js.perez20/Gadget-2.0.7/512_150k/bolshoi_128/snapshot_0{0}.eigen_2".format(i)
print file2
file3="/hpcfs/home/ciencias/fisica/pregrado/js.perez20/Gadget-2.0.7/512_150k/bolshoi_128/snapshot_0{0}.eigen_3".format(i)
print file3
classify(file1, file2, file3, 0.2)
t = range(31)
plt.plot(t,voids, 'bo', label='Voids')
plt.plot(t,sheets, 'r--', label='Sheets')
plt.plot(t,fils, 'ys', label='Filaments')
plt.plot(t,knots, 'g^', label='Knots')
plt.legend(bbox_to_anchor=(0.7,1))
plt.xlabel('snapshot #')
plt.ylabel('VFF')
plt.title('VFF')
plt.savefig('vff.pdf')
#eigen_1 = read_CIC_scalar(filein)
#print eigen_1.max(), eigen_1.min()
|
mit
|
public-ink/public-ink
|
server/appengine/lib/matplotlib/scale.py
|
4
|
17652
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from numpy import ma
from matplotlib.cbook import dedent
from matplotlib.ticker import (NullFormatter, ScalarFormatter,
LogFormatterSciNotation, LogitFormatter)
from matplotlib.ticker import (NullLocator, LogLocator, AutoLocator,
SymmetricalLogLocator, LogitLocator)
from matplotlib.transforms import Transform, IdentityTransform
from matplotlib import docstring
class ScaleBase(object):
"""
The base class for all scales.
Scales are separable transformations, working on a single dimension.
Any subclasses will want to override:
- :attr:`name`
- :meth:`get_transform`
- :meth:`set_default_locators_and_formatters`
And optionally:
- :meth:`limit_range_for_scale`
"""
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` object
associated with this scale.
"""
raise NotImplementedError()
def set_default_locators_and_formatters(self, axis):
"""
Set the :class:`~matplotlib.ticker.Locator` and
:class:`~matplotlib.ticker.Formatter` objects on the given
axis to match this scale.
"""
raise NotImplementedError()
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Returns the range *vmin*, *vmax*, possibly limited to the
domain supported by this scale.
*minpos* should be the minimum positive value in the data.
This is used by log scales to determine a minimum value.
"""
return vmin, vmax
class LinearScale(ScaleBase):
"""
The default linear scale.
"""
name = 'linear'
def __init__(self, axis, **kwargs):
pass
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to reasonable defaults for
linear scaling.
"""
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_locator(NullLocator())
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
The transform for linear scaling is just the
:class:`~matplotlib.transforms.IdentityTransform`.
"""
return IdentityTransform()
class LogTransformBase(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, nonpos):
Transform.__init__(self)
if nonpos == 'mask':
self._fill_value = np.nan
else:
self._fill_value = 1e-300
def transform_non_affine(self, a):
with np.errstate(invalid="ignore"):
a = np.where(a <= 0, self._fill_value, a)
return np.divide(np.log(a, out=a), np.log(self.base), out=a)
class InvertedLogTransformBase(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def transform_non_affine(self, a):
return ma.power(self.base, a)
class Log10Transform(LogTransformBase):
base = 10.0
def inverted(self):
return InvertedLog10Transform()
class InvertedLog10Transform(InvertedLogTransformBase):
base = 10.0
def inverted(self):
return Log10Transform()
class Log2Transform(LogTransformBase):
base = 2.0
def inverted(self):
return InvertedLog2Transform()
class InvertedLog2Transform(InvertedLogTransformBase):
base = 2.0
def inverted(self):
return Log2Transform()
class NaturalLogTransform(LogTransformBase):
base = np.e
def inverted(self):
return InvertedNaturalLogTransform()
class InvertedNaturalLogTransform(InvertedLogTransformBase):
base = np.e
def inverted(self):
return NaturalLogTransform()
class LogTransform(LogTransformBase):
def __init__(self, base, nonpos):
LogTransformBase.__init__(self, nonpos)
self.base = base
def inverted(self):
return InvertedLogTransform(self.base)
class InvertedLogTransform(InvertedLogTransformBase):
def __init__(self, base):
InvertedLogTransformBase.__init__(self)
self.base = base
def inverted(self):
return LogTransform(self.base)
class LogScale(ScaleBase):
"""
A standard logarithmic scale. Care is taken so non-positive
values are not plotted.
For computational efficiency (to push as much as possible to Numpy
C code in the common cases), this scale provides different
transforms depending on the base of the logarithm:
- base 10 (:class:`Log10Transform`)
- base 2 (:class:`Log2Transform`)
- base e (:class:`NaturalLogTransform`)
- arbitrary base (:class:`LogTransform`)
"""
name = 'log'
# compatibility shim
LogTransformBase = LogTransformBase
Log10Transform = Log10Transform
InvertedLog10Transform = InvertedLog10Transform
Log2Transform = Log2Transform
InvertedLog2Transform = InvertedLog2Transform
NaturalLogTransform = NaturalLogTransform
InvertedNaturalLogTransform = InvertedNaturalLogTransform
LogTransform = LogTransform
InvertedLogTransform = InvertedLogTransform
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*nonposx*/*nonposy*: ['mask' | 'clip' ]
non-positive values in *x* or *y* can be masked as
invalid, or clipped to a very small positive number
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[2, 3, 4, 5, 6, 7, 8, 9]``
will place 8 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
subs = kwargs.pop('subsx', None)
nonpos = kwargs.pop('nonposx', 'mask')
else:
base = kwargs.pop('basey', 10.0)
subs = kwargs.pop('subsy', None)
nonpos = kwargs.pop('nonposy', 'mask')
if nonpos not in ['mask', 'clip']:
raise ValueError("nonposx, nonposy kwarg must be 'mask' or 'clip'")
if base == 10.0:
self._transform = self.Log10Transform(nonpos)
elif base == 2.0:
self._transform = self.Log2Transform(nonpos)
elif base == np.e:
self._transform = self.NaturalLogTransform(nonpos)
else:
self._transform = self.LogTransform(base, nonpos)
self.base = base
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
log scaling.
"""
axis.set_major_locator(LogLocator(self.base))
axis.set_major_formatter(LogFormatterSciNotation(self.base))
axis.set_minor_locator(LogLocator(self.base, self.subs))
axis.set_minor_formatter(
LogFormatterSciNotation(self.base,
labelOnlyBase=(self.subs is not None)))
def get_transform(self):
"""
Return a :class:`~matplotlib.transforms.Transform` instance
appropriate for the given logarithm base.
"""
return self._transform
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to positive values.
"""
if not np.isfinite(minpos):
minpos = 1e-300 # This value should rarely if ever
# end up with a visible effect.
return (minpos if vmin <= 0 else vmin,
minpos if vmax <= 0 else vmax)
class SymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, base, linthresh, linscale):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self.linscale = linscale
self._linscale_adj = (linscale / (1.0 - self.base ** -1))
self._log_base = np.log(base)
def transform_non_affine(self, a):
sign = np.sign(a)
masked = ma.masked_inside(a,
-self.linthresh,
self.linthresh,
copy=False)
log = sign * self.linthresh * (
self._linscale_adj +
ma.log(np.abs(masked) / self.linthresh) / self._log_base)
if masked.mask.any():
return ma.where(masked.mask, a * self._linscale_adj, log)
else:
return log
def inverted(self):
return InvertedSymmetricalLogTransform(self.base, self.linthresh,
self.linscale)
class InvertedSymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, base, linthresh, linscale):
Transform.__init__(self)
symlog = SymmetricalLogTransform(base, linthresh, linscale)
self.base = base
self.linthresh = linthresh
self.invlinthresh = symlog.transform(linthresh)
self.linscale = linscale
self._linscale_adj = (linscale / (1.0 - self.base ** -1))
def transform_non_affine(self, a):
sign = np.sign(a)
masked = ma.masked_inside(a, -self.invlinthresh,
self.invlinthresh, copy=False)
exp = sign * self.linthresh * (
ma.power(self.base, (sign * (masked / self.linthresh))
- self._linscale_adj))
if masked.mask.any():
return ma.where(masked.mask, a / self._linscale_adj, exp)
else:
return exp
def inverted(self):
return SymmetricalLogTransform(self.base,
self.linthresh, self.linscale)
class SymmetricalLogScale(ScaleBase):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
name = 'symlog'
# compatibility shim
SymmetricalLogTransform = SymmetricalLogTransform
InvertedSymmetricalLogTransform = InvertedSymmetricalLogTransform
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*linthreshx*/*linthreshy*:
The range (-*x*, *x*) within which the plot is linear (to
avoid having the plot go to infinity around zero).
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[2, 3, 4, 5, 6, 7, 8, 9]``
will place 8 logarithmically spaced minor ticks between
each major tick.
*linscalex*/*linscaley*:
This allows the linear range (-*linthresh* to *linthresh*)
to be stretched relative to the logarithmic range. Its
value is the number of decades to use for each half of the
linear range. For example, when *linscale* == 1.0 (the
default), the space used for the positive and negative
halves of the linear range will be equal to one decade in
the logarithmic range.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
linthresh = kwargs.pop('linthreshx', 2.0)
subs = kwargs.pop('subsx', None)
linscale = kwargs.pop('linscalex', 1.0)
else:
base = kwargs.pop('basey', 10.0)
linthresh = kwargs.pop('linthreshy', 2.0)
subs = kwargs.pop('subsy', None)
linscale = kwargs.pop('linscaley', 1.0)
if base <= 1.0:
raise ValueError("'basex/basey' must be larger than 1")
if linthresh <= 0.0:
raise ValueError("'linthreshx/linthreshy' must be positive")
if linscale <= 0.0:
raise ValueError("'linscalex/linthreshy' must be positive")
self._transform = self.SymmetricalLogTransform(base,
linthresh,
linscale)
self.base = base
self.linthresh = linthresh
self.linscale = linscale
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
symmetrical log scaling.
"""
axis.set_major_locator(SymmetricalLogLocator(self.get_transform()))
axis.set_major_formatter(LogFormatterSciNotation(self.base))
axis.set_minor_locator(SymmetricalLogLocator(self.get_transform(),
self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`SymmetricalLogTransform` instance.
"""
return self._transform
class LogitTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, nonpos):
Transform.__init__(self)
if nonpos == 'mask':
self._fill_value = np.nan
else:
self._fill_value = 1e-300
self._nonpos = nonpos
def transform_non_affine(self, a):
"""logit transform (base 10), masked or clipped"""
with np.errstate(invalid="ignore"):
a = np.select(
[a <= 0, a >= 1], [self._fill_value, 1 - self._fill_value], a)
return np.log10(a / (1 - a))
def inverted(self):
return LogisticTransform(self._nonpos)
class LogisticTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, nonpos='mask'):
Transform.__init__(self)
self._nonpos = nonpos
def transform_non_affine(self, a):
"""logistic transform (base 10)"""
return 1.0 / (1 + 10**(-a))
def inverted(self):
return LogitTransform(self._nonpos)
class LogitScale(ScaleBase):
"""
Logit scale for data between zero and one, both excluded.
This scale is similar to a log scale close to zero and to one, and almost
linear around 0.5. It maps the interval ]0, 1[ onto ]-infty, +infty[.
"""
name = 'logit'
def __init__(self, axis, nonpos='mask'):
"""
*nonpos*: ['mask' | 'clip' ]
values beyond ]0, 1[ can be masked as invalid, or clipped to a number
very close to 0 or 1
"""
if nonpos not in ['mask', 'clip']:
raise ValueError("nonposx, nonposy kwarg must be 'mask' or 'clip'")
self._transform = LogitTransform(nonpos)
def get_transform(self):
"""
Return a :class:`LogitTransform` instance.
"""
return self._transform
def set_default_locators_and_formatters(self, axis):
# ..., 0.01, 0.1, 0.5, 0.9, 0.99, ...
axis.set_major_locator(LogitLocator())
axis.set_major_formatter(LogitFormatter())
axis.set_minor_locator(LogitLocator(minor=True))
axis.set_minor_formatter(LogitFormatter())
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to values between 0 and 1 (excluded).
"""
if not np.isfinite(minpos):
minpos = 1e-7 # This value should rarely if ever
# end up with a visible effect.
return (minpos if vmin <= 0 else vmin,
1 - minpos if vmax >= 1 else vmax)
_scale_mapping = {
'linear': LinearScale,
'log': LogScale,
'symlog': SymmetricalLogScale,
'logit': LogitScale,
}
def get_scale_names():
names = list(six.iterkeys(_scale_mapping))
names.sort()
return names
def scale_factory(scale, axis, **kwargs):
"""
Return a scale class by name.
ACCEPTS: [ %(names)s ]
"""
scale = scale.lower()
if scale is None:
scale = 'linear'
if scale not in _scale_mapping:
raise ValueError("Unknown scale type '%s'" % scale)
return _scale_mapping[scale](axis, **kwargs)
scale_factory.__doc__ = dedent(scale_factory.__doc__) % \
{'names': " | ".join(get_scale_names())}
def register_scale(scale_class):
"""
Register a new kind of scale.
*scale_class* must be a subclass of :class:`ScaleBase`.
"""
_scale_mapping[scale_class.name] = scale_class
def get_scale_docs():
"""
Helper function for generating docstrings related to scales.
"""
docs = []
for name in get_scale_names():
scale_class = _scale_mapping[name]
docs.append(" '%s'" % name)
docs.append("")
class_docs = dedent(scale_class.__init__.__doc__)
class_docs = "".join([" %s\n" %
x for x in class_docs.split("\n")])
docs.append(class_docs)
docs.append("")
return "\n".join(docs)
docstring.interpd.update(
scale=' | '.join([repr(x) for x in get_scale_names()]),
scale_docs=get_scale_docs().rstrip(),
)
|
gpl-3.0
|
DBernardes/ProjetoECC
|
Ganho/Codigo/CCDinfo.py
|
1
|
1159
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Criado em 18 de Outubro de 2016
Descricao: este modulo tem como entrada o cabecalho de uma imagen fits e a quantidade de imagens da serie obtidam retornado uma string com as principais informacoes do CCD.
@author: Denis Varise Bernardes & Eder Martioli
Laboratorio Nacional de Astrofisica, Brazil.
"""
__version__ = "1.0"
__copyright__ = """
Copyright (c) ... All rights reserved.
"""
import matplotlib.pyplot as plt
def CCDinfo(header, nImagesFlat, nImagesBias):
date = header['date'].split('T')
plt.xticks(())
plt.yticks(())
text = 'Camera: ' + header['head'] +'\n' + 'Data do experimento: %s %s ' %(date[0], date[1]) +'\n' + 'Quantidade de imagens: %i imagens de Flat e %i imagens de bias' %(nImagesFlat, nImagesBias) + '\n' + 'Modo de Leitura: %s' %(header['ACQMODE']) + '\n' + 'Taxa de leitura: %.2f MHz'%(1/(header['readtime']*1000000)) + '\n' + 'Pre-amplificacao: %i' %(header['preamp']) + '\n' + 'VShift Speed: %.3f e-6' %(header['vshift']*1000000)
return text
|
mit
|
gakarak/FCN_MSCOCO_Food_Segmentation
|
MSCOCO_Processing/PythonAPI/run10_common_onimage.py
|
1
|
15089
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import glob
import os
import sys
import time
import numpy as np
import json
import matplotlib.pyplot as plt
try:
import cPickle as pickle
except:
import pickle
import skimage.io as skio
import skimage.transform as sktf
import skimage.color as skolor
import pandas as pd
import matplotlib.pyplot as plt
import keras
from keras import backend as K
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Convolution2D, Activation, MaxPooling2D,\
Flatten, BatchNormalization, InputLayer, Dropout, Reshape, Permute, Input, UpSampling2D, Lambda
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.utils.visualize_util import plot as kplot
import tensorflow as tf
##############################################
def split_list_by_blocks(lst, psiz):
tret = [lst[x:x + psiz] for x in range(0, len(lst), psiz)]
return tret
##############################################
dictFoods = {
# 'banana' : 52,
# 'apple' : 53,
'sandwich' : 54,
# 'orange' : 55,
# 'broccoli' : 56,
# 'carrot' : 57,
'hot dog' : 58,
'pizza' : 59,
# 'donut' : 60,
'cake' : 61
}
reversedDirFoods = {vv:kk for kk,vv in dictFoods.items()}
listSortedFoodNames=[
'pizza',
'cake',
'sandwich',
'hot dog',
# 'donut',
# 'banana',
# 'apple',
# 'orange',
# 'broccoli',
# 'carrot'
]
listSortedFoodIds = [dictFoods[xx] for xx in listSortedFoodNames]
dictCOCO2Index = {dictFoods[xx]:(ii+1) for ii,xx in enumerate(listSortedFoodNames)}
######################################################
class BatcherOnImageCOCO:
pathDataIdx=None
pathMeanData=None
meanPrefix='mean.pkl'
arrPathDataImg=None
arrPathDataMsk=None
wdir=None
dataImg = None
dataMsk = None
dataMskCls = None
meanData = None
#
imgScale = 1.
modelPrefix = None
#
isTheanoShape=True
isRemoveMeanImage=False
isDataInMemory=False
shapeImg = None
numCh = 1
numImg = -1
numCls = -1
def __init__(self, pathDataIdx, pathMeanData=None, isRecalculateMeanIfExist=False,
isTheanoShape=True,
isRemoveMeanImage=False,
isLoadIntoMemory=False):
self.isTheanoShape=isTheanoShape
self.isRemoveMeanImage=isRemoveMeanImage
# (1) Check input Image
if not os.path.isfile(pathDataIdx):
raise Exception('Cant find input Image file [%s]' % pathDataIdx)
self.pathDataIdx = os.path.abspath(pathDataIdx)
self.wdir = os.path.dirname(self.pathDataIdx)
tdata = pd.read_csv(self.pathDataIdx, header=None)
# (2) Check input Image Mask
self.arrPathDataImg = np.array([os.path.join(self.wdir, xx) for xx in tdata[0]])
self.arrPathDataMsk = np.array(['%s-mskfood-idx.png' % xx for xx in self.arrPathDataImg])
# (3) Load Image and Mask
tpathImg = self.arrPathDataImg[0]
tpathMsk = self.arrPathDataMsk[0]
if not os.path.isfile(tpathImg):
raise Exception('Cant find CT Image file [%s]' % tpathImg)
if not os.path.isfile(tpathMsk):
raise Exception('Cant find CT Image Mask file [%s]' % tpathMsk)
tdataImg = skio.imread(tpathImg)
tdataMsk = skio.imread(tpathMsk)
tdataImg = self.adjustImage(self.transformImageFromOriginal(tdataImg))
tdataMsk = self.transformImageFromOriginal(tdataMsk)
self.numCls = len(listSortedFoodNames)+1
tdataMskCls = self.convertMskToOneHot(tdataMsk)
self.shapeImg = tdataImg.shape
self.shapeMsk = tdataMskCls.shape
# (5) Load data into memory
self.numImg = len(self.arrPathDataImg)
if isLoadIntoMemory:
self.isDataInMemory = True
self.dataImg = np.zeros([self.numImg] + list(self.shapeImg), dtype=np.float)
self.dataMsk = np.zeros([self.numImg] + list(self.shapeImg), dtype=np.float)
self.dataMskCls = np.zeros([self.numImg] + list(self.shapeMsk), dtype=np.float)
print (':: Loading data into memory:')
for ii in range(self.numImg):
tpathImg = self.arrPathDataImg[ii]
tpathMsk = self.arrPathDataMsk[ii]
#
tdataImg = self.adjustImage(skio.imread(tpathImg))
tdataMsk = skio.imread(tpathMsk)
tdataImg = self.transformImageFromOriginal(tdataImg)
tdataMsk = self.transformImageFromOriginal(tdataMsk)
tdataMskCls = self.convertMskToOneHot(tdataMsk)
self.dataImg[ii] = tdataImg
self.dataMsk[ii] = tdataMsk
self.dataMskCls[ii] = tdataMskCls
if (ii % 10) == 0:
print ('\t[%d/%d] ...' % (ii, self.numImg))
print ('\t... [done]')
if self.isTheanoShape:
tshp = self.dataMskCls.shape
print (tshp)
else:
self.isDataInMemory = False
self.dataImg = None
self.dataMsk = None
self.dataMskCls = None
def getNumImg(self):
if self.isInitialized():
return self.numImg
else:
return 0
def adjustImage(self, pimg):
tret = (1./255.)*pimg.astype(np.float) - 0.5
return tret
def convertMskToOneHot(self, msk):
tshape = list(msk.shape)
if self.numCls>2:
tret = np_utils.to_categorical(msk.reshape(-1), self.numCls)
else:
tret = (msk.reshape(-1)>0).astype(np.float)
tret = np.vstack((1.-tret,tret)).transpose()
if self.isTheanoShape:
tmpShape = list(tshape[1:]) + [self.numCls]
# tshape[ 0] = self.numCls
else:
tmpShape = tshape
tmpShape[-1] = self.numCls
tret = tret.reshape(tmpShape)
if self.isTheanoShape:
#FIXME: work only for 2D!!! (not 3D)
tret = tret.transpose((2,0,1))
return tret
def isInitialized(self):
return (self.shapeImg is not None) and (self.shapeMsk is not None) and (self.wdir is not None) and (self.numCls>0)
def checkIsInitialized(self):
if not self.isInitialized():
raise Exception('class Batcher() is not correctly initialized')
def toString(self):
if self.isInitialized():
if self.meanData is not None:
tstr = 'Shape=%s, #Samples=%d, #Labels=%d, meanValuePerCh=%s' % (self.shapeImg, self.numImg, self.numCls, self.meanData['meanCh'])
else:
tstr = 'Shape=%s, #Samples=%d, #Labels=%d, meanValuePerCh= is Not Calculated' % (self.shapeImg, self.numImg, self.numCls)
else:
tstr = "BatcherOnImage2D() is not initialized"
return tstr
def __str__(self):
return self.toString()
def __repr__(self):
return self.toString()
def preprocImageShape(self, img):
if self.isTheanoShape:
if img.ndim==2:
return img.reshape([1] + list(img.shape))
else:
return img.transpose((2,0,1))
else:
if img.ndim==2:
return img.reshape(list(img.shape) + [1])
else:
return img
def transformImageFromOriginal(self, pimg):
tmp = self.preprocImageShape(pimg)
return tmp.astype(np.float)
def getBatchDataByIdx(self, parBatchIdx):
rndIdx = parBatchIdx
parBatchSize = len(rndIdx)
dataX = np.zeros([parBatchSize] + list(self.shapeImg), dtype=np.float)
dataY = np.zeros([parBatchSize] + list(self.shapeMsk), dtype=np.float)
for ii, tidx in enumerate(rndIdx):
if self.isDataInMemory:
dataX[ii] = self.dataImg[tidx]
dataY[ii] = self.dataMskCls[tidx]
else:
tpathImg = self.arrPathDataImg[tidx]
tpathMsk = self.arrPathDataMsk[tidx]
tdataImg = self.adjustImage(skio.imread(tpathImg))
tdataMsk = skio.imread(tpathMsk)
tdataImg = self.transformImageFromOriginal(tdataImg)
tdataMsk = self.transformImageFromOriginal(tdataMsk)
tdataMskCls = self.convertMskToOneHot(tdataMsk)
dataX[ii] = tdataImg
dataY[ii] = tdataMskCls
if self.isTheanoShape:
tshp = dataY.shape
dataY = dataY.reshape([tshp[0], tshp[1], np.prod(tshp[-2:])]).transpose((0, 2, 1))
# print (tshp)
return (dataX, dataY)
def getBatchData(self, parBatchSize=8):
self.checkIsInitialized()
numImg = self.numImg
rndIdx = np.random.permutation(range(numImg))[:parBatchSize]
return self.getBatchDataByIdx(rndIdx)
def exportModel(self, model, epochId, extInfo=None):
if extInfo is not None:
modelPrefix = extInfo
else:
modelPrefix = ''
foutModel = "%s-e%03d.json" % (modelPrefix, epochId)
foutWeights = "%s-e%03d.h5" % (modelPrefix, epochId)
foutModel = '%s-%s' % (self.pathDataIdx, foutModel)
foutWeights = '%s-%s' % (self.pathDataIdx, foutWeights)
with open(foutModel, 'w') as f:
str = json.dumps(json.loads(model.to_json()), indent=3)
f.write(str)
model.save_weights(foutWeights, overwrite=True)
return foutModel
def buildModel_TF(self, targetImageShaped=None):
if not self.checkIsInitialized():
retModel = buildModelOnImageCT_TF(inpShape=self.shapeImg, numCls=self.numCls)
print ('>>> BatcherOnImage2D::buildModel() with input shape: %s' % list(retModel[0].input_shape) )
return retModel
else:
raise Exception('*** BatcherOnImage2D is not initialized ***')
@staticmethod
def loadModelFromJson(pathModelJson):
if not os.path.isfile(pathModelJson):
raise Exception('Cant find JSON-file [%s]' % pathModelJson)
tpathBase = os.path.splitext(pathModelJson)[0]
tpathModelWeights = '%s.h5' % tpathBase
if not os.path.isfile(tpathModelWeights):
raise Exception('Cant find h5-Weights-file [%s]' % tpathModelWeights)
with open(pathModelJson, 'r') as f:
tmpStr = f.read()
model = keras.models.model_from_json(tmpStr)
model.load_weights(tpathModelWeights)
return model
@staticmethod
def loadModelFromDir(pathDirWithModels, paramFilter=None):
if paramFilter is None:
lstModels = glob.glob('%s/*.json' % pathDirWithModels)
else:
lstModels = glob.glob('%s/*%s*.json' % (pathDirWithModels, paramFilter))
pathJson = os.path.abspath(sorted(lstModels)[-1])
print (':: found model [%s] in directory [%s]' % (os.path.basename(pathJson), pathDirWithModels))
return BatcherOnImageCOCO.loadModelFromJson(pathJson)
######################################################
def buildModelOnImage_COCO(inpShape=(128, 128, 64, 1), numCls=2, sizFlt=3, isTheanoFrmwk=True):
dataInput = Input(shape=inpShape)
# -------- Encoder --------
# Conv1
x = Convolution2D(nb_filter=16, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(dataInput)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv2
x = Convolution2D(nb_filter=32, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = Convolution2D(nb_filter=32, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv3
x = Convolution2D(nb_filter=64, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = Convolution2D(nb_filter=64, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv4
x = Convolution2D(nb_filter=128, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = Convolution2D(nb_filter=128, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv5
# x = Convolution3D(nb_filter=256, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
# x = MaxPooling3D(pool_size=(2, 2))(x)
# Conv6
# x = Convolution3D(nb_filter=256, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
# x = MaxPooling3D(pool_size=(2, 2))(x)
# -------- Decoder --------
# UpConv #1
x = Convolution2D(nb_filter=128, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = Convolution2D(nb_filter=128, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = UpSampling2D(size=(2, 2))(x)
# UpConv #2
x = Convolution2D(nb_filter=64, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = Convolution2D(nb_filter=64, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = UpSampling2D(size=(2, 2))(x)
# UpConv #3
x = Convolution2D(nb_filter=32, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = Convolution2D(nb_filter=32, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = UpSampling2D(size=(2, 2))(x)
# UpConv #4
x = Convolution2D(nb_filter=32, nb_col=sizFlt, nb_row=sizFlt, border_mode='same', activation='relu')(x)
x = UpSampling2D(size=(2, 2))(x)
# 1x1 Convolution: emulation of Dense layer
x = Convolution2D(nb_filter=numCls, nb_col=1, nb_row=1, border_mode='same', activation='linear')(x)
# -------- Finalize --------
#
if isTheanoFrmwk:
tmpModel = Model(dataInput, x)
tmpShape = tmpModel.output_shape[-2:]
sizeReshape = np.prod(tmpShape)
x = Reshape([numCls, sizeReshape])(x)
x = Permute((2, 1))(x)
x = Activation('softmax')(x)
retModel = Model(dataInput, x)
else:
x = Lambda(lambda XX: tf.nn.softmax(XX))(x)
retModel = Model(dataInput, x)
retShape = retModel.output_shape[1:-1]
return (retModel, retShape)
######################################################
if __name__=='__main__':
fidxTrn = '/mnt/data1T2/datasets2/mscoco/raw-data/train2014-food2-128x128/idx.txt'
fidxVal = '/mnt/data1T2/datasets2/mscoco/raw-data/val2014-food2-128x128/idx.txt'
batcherTrn = BatcherOnImageCOCO(pathDataIdx=fidxTrn, isTheanoShape=True)
batcherVal = BatcherOnImageCOCO(pathDataIdx=fidxVal, isTheanoShape=True)
print ('Train : %s' % batcherTrn)
print ('Validation : %s' % batcherVal)
#
dataX, dataY = batcherTrn.getBatchData()
print ('dataX.shape = %s, dataY.shape = %s' % (list(dataX.shape), list(dataY.shape)))
#
model,_ = buildModelOnImage_COCO(inpShape=batcherTrn.shapeImg, numCls=batcherTrn.numCls, isTheanoFrmwk=True)
model.summary()
|
apache-2.0
|
walterreade/scikit-learn
|
examples/classification/plot_lda.py
|
142
|
2419
|
"""
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='navy')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='gold')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
|
bsd-3-clause
|
harshaneelhg/scikit-learn
|
examples/linear_model/plot_logistic_path.py
|
349
|
1195
|
#!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
nmartensen/pandas
|
asv_bench/benchmarks/timeseries.py
|
3
|
16150
|
try:
from pandas.plotting._converter import DatetimeConverter
except ImportError:
from pandas.tseries.converter import DatetimeConverter
from .pandas_vb_common import *
import pandas as pd
import datetime as dt
try:
import pandas.tseries.holiday
except ImportError:
pass
from pandas.tseries.frequencies import infer_freq
import numpy as np
if hasattr(Series, 'convert'):
Series.resample = Series.convert
class DatetimeIndex(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
self.delta_offset = pd.offsets.Day()
self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
self.slow_offset = pd.offsets.BusinessDay()
self.rng2 = date_range(start='1/1/2000 9:30', periods=10000, freq='S', tz='US/Eastern')
self.index_repeated = date_range(start='1/1/2000', periods=1000, freq='T').repeat(10)
self.rng3 = date_range(start='1/1/2000', periods=1000, freq='H')
self.df = DataFrame(np.random.randn(len(self.rng3), 2), self.rng3)
self.rng4 = date_range(start='1/1/2000', periods=1000, freq='H', tz='US/Eastern')
self.df2 = DataFrame(np.random.randn(len(self.rng4), 2), index=self.rng4)
N = 100000
self.dti = pd.date_range('2011-01-01', freq='H', periods=N).repeat(5)
self.dti_tz = pd.date_range('2011-01-01', freq='H', periods=N,
tz='Asia/Tokyo').repeat(5)
self.rng5 = date_range(start='1/1/2000', end='3/1/2000', tz='US/Eastern')
self.dst_rng = date_range(start='10/29/2000 1:00:00', end='10/29/2000 1:59:59', freq='S')
self.index = date_range(start='10/29/2000', end='10/29/2000 00:59:59', freq='S')
self.index = self.index.append(self.dst_rng)
self.index = self.index.append(self.dst_rng)
self.index = self.index.append(date_range(start='10/29/2000 2:00:00', end='10/29/2000 3:00:00', freq='S'))
self.N = 10000
self.rng6 = date_range(start='1/1/1', periods=self.N, freq='B')
self.rng7 = date_range(start='1/1/1700', freq='D', periods=100000)
self.no_freq = self.rng7[:50000].append(self.rng7[50002:])
self.d_freq = self.rng7[:50000].append(self.rng7[50000:])
self.rng8 = date_range(start='1/1/1700', freq='B', periods=100000)
self.b_freq = self.rng8[:50000].append(self.rng8[50000:])
def time_add_timedelta(self):
(self.rng + dt.timedelta(minutes=2))
def time_add_offset_delta(self):
(self.rng + self.delta_offset)
def time_add_offset_fast(self):
(self.rng + self.fast_offset)
def time_add_offset_slow(self):
(self.rng + self.slow_offset)
def time_normalize(self):
self.rng2.normalize()
def time_unique(self):
self.index_repeated.unique()
def time_reset_index(self):
self.df.reset_index()
def time_reset_index_tz(self):
self.df2.reset_index()
def time_dti_factorize(self):
self.dti.factorize()
def time_dti_tz_factorize(self):
self.dti_tz.factorize()
def time_timestamp_tzinfo_cons(self):
self.rng5[0]
def time_infer_dst(self):
self.index.tz_localize('US/Eastern', infer_dst=True)
def time_timeseries_is_month_start(self):
self.rng6.is_month_start
def time_infer_freq_none(self):
infer_freq(self.no_freq)
def time_infer_freq_daily(self):
infer_freq(self.d_freq)
def time_infer_freq_business(self):
infer_freq(self.b_freq)
class TimeDatetimeConverter(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
def time_convert(self):
DatetimeConverter.convert(self.rng, None, None)
class Iteration(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.M = 10000
self.idx1 = date_range(start='20140101', freq='T', periods=self.N)
self.idx2 = period_range(start='20140101', freq='T', periods=self.N)
def iter_n(self, iterable, n=None):
self.i = 0
for _ in iterable:
self.i += 1
if ((n is not None) and (self.i > n)):
break
def time_iter_datetimeindex(self):
self.iter_n(self.idx1)
def time_iter_datetimeindex_preexit(self):
self.iter_n(self.idx1, self.M)
def time_iter_periodindex(self):
self.iter_n(self.idx2)
def time_iter_periodindex_preexit(self):
self.iter_n(self.idx2, self.M)
#----------------------------------------------------------------------
# Resampling
class ResampleDataFrame(object):
goal_time = 0.2
def setup(self):
self.rng = date_range(start='20130101', periods=100000, freq='50L')
self.df = DataFrame(np.random.randn(100000, 2), index=self.rng)
def time_max_numpy(self):
self.df.resample('1s', how=np.max)
def time_max_string(self):
self.df.resample('1s', how='max')
def time_mean_numpy(self):
self.df.resample('1s', how=np.mean)
def time_mean_string(self):
self.df.resample('1s', how='mean')
def time_min_numpy(self):
self.df.resample('1s', how=np.min)
def time_min_string(self):
self.df.resample('1s', how='min')
class ResampleSeries(object):
goal_time = 0.2
def setup(self):
self.rng1 = period_range(start='1/1/2000', end='1/1/2001', freq='T')
self.ts1 = Series(np.random.randn(len(self.rng1)), index=self.rng1)
self.rng2 = date_range(start='1/1/2000', end='1/1/2001', freq='T')
self.ts2 = Series(np.random.randn(len(self.rng2)), index=self.rng2)
self.rng3 = date_range(start='2000-01-01 00:00:00', end='2000-01-01 10:00:00', freq='555000U')
self.int_ts = Series(5, self.rng3, dtype='int64')
self.dt_ts = self.int_ts.astype('datetime64[ns]')
def time_period_downsample_mean(self):
self.ts1.resample('D', how='mean')
def time_timestamp_downsample_mean(self):
self.ts2.resample('D', how='mean')
def time_resample_datetime64(self):
# GH 7754
self.dt_ts.resample('1S', how='last')
def time_1min_5min_mean(self):
self.ts2[:10000].resample('5min', how='mean')
def time_1min_5min_ohlc(self):
self.ts2[:10000].resample('5min', how='ohlc')
class AsOf(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
self.ts = Series(np.random.randn(self.N), index=self.rng)
self.dates = date_range(start='1/1/1990', periods=(self.N * 10), freq='5s')
self.ts2 = self.ts.copy()
self.ts2[250:5000] = np.nan
self.ts3 = self.ts.copy()
self.ts3[-5000:] = np.nan
# test speed of pre-computing NAs.
def time_asof(self):
self.ts.asof(self.dates)
# should be roughly the same as above.
def time_asof_nan(self):
self.ts2.asof(self.dates)
# test speed of the code path for a scalar index
# without *while* loop
def time_asof_single(self):
self.ts.asof(self.dates[0])
# test speed of the code path for a scalar index
# before the start. should be the same as above.
def time_asof_single_early(self):
self.ts.asof(self.dates[0] - dt.timedelta(10))
# test the speed of the code path for a scalar index
# with a long *while* loop. should still be much
# faster than pre-computing all the NAs.
def time_asof_nan_single(self):
self.ts3.asof(self.dates[-1])
class AsOfDataFrame(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.M = 100
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
self.dates = date_range(start='1/1/1990', periods=(self.N * 10), freq='5s')
self.ts = DataFrame(np.random.randn(self.N, self.M), index=self.rng)
self.ts2 = self.ts.copy()
self.ts2.iloc[250:5000] = np.nan
self.ts3 = self.ts.copy()
self.ts3.iloc[-5000:] = np.nan
# test speed of pre-computing NAs.
def time_asof(self):
self.ts.asof(self.dates)
# should be roughly the same as above.
def time_asof_nan(self):
self.ts2.asof(self.dates)
# test speed of the code path for a scalar index
# with pre-computing all NAs.
def time_asof_single(self):
self.ts.asof(self.dates[0])
# should be roughly the same as above.
def time_asof_nan_single(self):
self.ts3.asof(self.dates[-1])
# test speed of the code path for a scalar index
# before the start. should be without the cost of
# pre-computing all the NAs.
def time_asof_single_early(self):
self.ts.asof(self.dates[0] - dt.timedelta(10))
class TimeSeries(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='s')
self.rng = self.rng.take(np.random.permutation(self.N))
self.ts = Series(np.random.randn(self.N), index=self.rng)
self.rng2 = date_range(start='1/1/2000', periods=self.N, freq='T')
self.ts2 = Series(np.random.randn(self.N), index=self.rng2)
self.lindex = np.random.permutation(self.N)[:(self.N // 2)]
self.rindex = np.random.permutation(self.N)[:(self.N // 2)]
self.left = Series(self.ts2.values.take(self.lindex), index=self.ts2.index.take(self.lindex))
self.right = Series(self.ts2.values.take(self.rindex), index=self.ts2.index.take(self.rindex))
self.rng3 = date_range(start='1/1/2000', periods=1500000, freq='S')
self.ts3 = Series(1, index=self.rng3)
def time_sort_index_monotonic(self):
self.ts2.sort_index()
def time_sort_index_non_monotonic(self):
self.ts.sort_index()
def time_timeseries_slice_minutely(self):
self.ts2[:10000]
def time_add_irregular(self):
(self.left + self.right)
def time_large_lookup_value(self):
self.ts3[self.ts3.index[(len(self.ts3) // 2)]]
self.ts3.index._cleanup()
class SeriesArithmetic(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.s = Series(date_range(start='20140101', freq='T', periods=self.N))
self.delta_offset = pd.offsets.Day()
self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
self.slow_offset = pd.offsets.BusinessDay()
def time_add_offset_delta(self):
(self.s + self.delta_offset)
def time_add_offset_fast(self):
(self.s + self.fast_offset)
def time_add_offset_slow(self):
(self.s + self.slow_offset)
class ToDatetime(object):
goal_time = 0.2
def setup(self):
self.rng = date_range(start='1/1/2000', periods=10000, freq='D')
self.stringsD = Series((((self.rng.year * 10000) + (self.rng.month * 100)) + self.rng.day), dtype=np.int64).apply(str)
self.rng = date_range(start='1/1/2000', periods=20000, freq='H')
self.strings = [x.strftime('%Y-%m-%d %H:%M:%S') for x in self.rng]
self.strings_nosep = [x.strftime('%Y%m%d %H:%M:%S') for x in self.rng]
self.strings_tz_space = [x.strftime('%Y-%m-%d %H:%M:%S') + ' -0800'
for x in self.rng]
self.s = Series((['19MAY11', '19MAY11:00:00:00'] * 100000))
self.s2 = self.s.str.replace(':\\S+$', '')
def time_format_YYYYMMDD(self):
to_datetime(self.stringsD, format='%Y%m%d')
def time_iso8601(self):
to_datetime(self.strings)
def time_iso8601_nosep(self):
to_datetime(self.strings_nosep)
def time_iso8601_format(self):
to_datetime(self.strings, format='%Y-%m-%d %H:%M:%S')
def time_iso8601_format_no_sep(self):
to_datetime(self.strings_nosep, format='%Y%m%d %H:%M:%S')
def time_iso8601_tz_spaceformat(self):
to_datetime(self.strings_tz_space)
def time_format_exact(self):
to_datetime(self.s2, format='%d%b%y')
def time_format_no_exact(self):
to_datetime(self.s, format='%d%b%y', exact=False)
class Offsets(object):
goal_time = 0.2
def setup(self):
self.date = dt.datetime(2011, 1, 1)
self.dt64 = np.datetime64('2011-01-01 09:00Z')
self.hcal = pd.tseries.holiday.USFederalHolidayCalendar()
self.day = pd.offsets.Day()
self.year = pd.offsets.YearBegin()
self.cday = pd.offsets.CustomBusinessDay()
self.cmb = pd.offsets.CustomBusinessMonthBegin(calendar=self.hcal)
self.cme = pd.offsets.CustomBusinessMonthEnd(calendar=self.hcal)
self.cdayh = pd.offsets.CustomBusinessDay(calendar=self.hcal)
def time_timeseries_day_apply(self):
self.day.apply(self.date)
def time_timeseries_day_incr(self):
(self.date + self.day)
def time_timeseries_year_apply(self):
self.year.apply(self.date)
def time_timeseries_year_incr(self):
(self.date + self.year)
# custom business offsets
def time_custom_bday_decr(self):
(self.date - self.cday)
def time_custom_bday_incr(self):
(self.date + self.cday)
def time_custom_bday_apply(self):
self.cday.apply(self.date)
def time_custom_bday_apply_dt64(self):
self.cday.apply(self.dt64)
def time_custom_bday_cal_incr(self):
self.date + 1 * self.cdayh
def time_custom_bday_cal_decr(self):
self.date - 1 * self.cdayh
def time_custom_bday_cal_incr_n(self):
self.date + 10 * self.cdayh
def time_custom_bday_cal_incr_neg_n(self):
self.date - 10 * self.cdayh
# Increment custom business month
def time_custom_bmonthend_incr(self):
(self.date + self.cme)
def time_custom_bmonthend_incr_n(self):
(self.date + (10 * self.cme))
def time_custom_bmonthend_decr_n(self):
(self.date - (10 * self.cme))
def time_custom_bmonthbegin_decr_n(self):
(self.date - (10 * self.cmb))
def time_custom_bmonthbegin_incr_n(self):
(self.date + (10 * self.cmb))
class SemiMonthOffset(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
# date is not on an offset which will be slowest case
self.date = dt.datetime(2011, 1, 2)
self.semi_month_end = pd.offsets.SemiMonthEnd()
self.semi_month_begin = pd.offsets.SemiMonthBegin()
def time_end_apply(self):
self.semi_month_end.apply(self.date)
def time_end_incr(self):
self.date + self.semi_month_end
def time_end_incr_n(self):
self.date + 10 * self.semi_month_end
def time_end_decr(self):
self.date - self.semi_month_end
def time_end_decr_n(self):
self.date - 10 * self.semi_month_end
def time_end_apply_index(self):
self.semi_month_end.apply_index(self.rng)
def time_end_incr_rng(self):
self.rng + self.semi_month_end
def time_end_decr_rng(self):
self.rng - self.semi_month_end
def time_begin_apply(self):
self.semi_month_begin.apply(self.date)
def time_begin_incr(self):
self.date + self.semi_month_begin
def time_begin_incr_n(self):
self.date + 10 * self.semi_month_begin
def time_begin_decr(self):
self.date - self.semi_month_begin
def time_begin_decr_n(self):
self.date - 10 * self.semi_month_begin
def time_begin_apply_index(self):
self.semi_month_begin.apply_index(self.rng)
def time_begin_incr_rng(self):
self.rng + self.semi_month_begin
def time_begin_decr_rng(self):
self.rng - self.semi_month_begin
class DatetimeAccessor(object):
def setup(self):
self.N = 100000
self.series = pd.Series(
pd.date_range(start='1/1/2000', periods=self.N, freq='T')
)
def time_dt_accessor(self):
self.series.dt
def time_dt_accessor_normalize(self):
self.series.dt.normalize()
|
bsd-3-clause
|
sniemi/SamPy
|
sandbox/testing/sphinx_test/STISCCDSpectroscopyFlat.py
|
1
|
58685
|
#! /usr/bin/env python
'''
ABOUT:
Creates STIS Spectroscopic pixel-to-pixel flat fields from RAW data.
Uses spline fits after dust motes, bad pixels, etc. have been masked
to fit and to remove the low frequency structure. The data are divided
by each fit to normalize the data before it is combined and co-added with
appropriate weighting of flux errors.
Both 50CCD and 52x2 data can be used. For 52x2 row fitting is not
done currently due to emission lines in the lamp spectrum.
For more information see STIS ISR 1999-06 by Bohlin and TIR 2010 by Niemi.
DEPENDS:
Python 2.5 or later (not 3.x compatible however)
NumPy
PyFITS
PyRAF
SciPy
matplotlib
TESTED:
Python 2.5.4
NumPy 1.4.0.dev7576
SciPy
PyFITS 2.2.2
PyRAF
matplotlib 1.0.svn
HISTORY:
Created on November 23, 2009
VERSION HISTORY:
0.1: testing version (SMN)
0.2: test release (SMN)
0.3: first full working version (SMN)
0.4: modified FITS output to comply with CDBS rules (SMN)
0.5: improved documentation (SMN)
@author: Sami-Matias Niemi
@version: 0.5
@organization: STScI
@contact: [email protected]
@requires: NumPy
@requires: SciPy
@requires: PyFITS
@requires: PyRAF
@requires: matplotlib
@todo: For improved median filter, one could use scipy.ndimage.filters.median_filter
'''
import matplotlib
matplotlib.use('PDF')
matplotlib.rcParams['legend.fontsize'] = 9
import pyfits as PF
import numpy as N
import pylab as PL
import scipy.signal as SS
import scipy.interpolate as I
import scipy.optimize as O
import glob as G
import datetime as D
import pyraf
from pyraf.iraf import stsdas,hst_calib,stis
from matplotlib import cm
from matplotlib.patches import Circle
import math, os, os.path, sys, shutil
__author__ = 'Sami-Matias Niemi'
__version__ = '0.5'
class Findfiles:
'''
Finds all files that are suitable for making a STIS CCD
spectroscopy flat field. Searches from input directory
all files that math the extension.
'''
def __init__(self, input, output, extension = '_raw'):
'''
Note that the detector (CCD) and optical element (G430M)
has been hard coded to the self.obsmode dictionary.
@param input: an input directory
@param output: an output directory
@param extension: an extension that used used to identify files
'''
self.input = input
self.output = output
self.extension = extension
self.obsmode = {'DETECTOR' : 'CCD', 'OPT_ELEM' : 'G430M'}
def obsMode(self, filelist):
'''
Checks that the observation mode matches.
Uses the hard coded self.obsmode dictionary.
@param filelist: a list of file names to be tested.
@return: a list of file names that are of right obsmode.
'''
ok = True
out = []
for file in filelist:
hdr = PF.open(file)[0].header
for key in self.obsmode:
if self.obsmode[key] != hdr[key]:
ok = False
if ok:
out.append(file)
return out
def gain(self, filelist, value):
'''
Checks that gain equals the given value.
@param filelist: a list of file names to be tested
@param value: a gain value that the gain have to match
@return: a list of file names that match the gain value
'''
out = []
for file in filelist:
hdr = PF.open(self.input + file)[0].header
if hdr['CCDGAIN'] == value:
out.append(file)
return out
def apertureFilter(self, filelist, aperture = '50CCD', filter = 'Clear'):
'''
Checks that the aperture and filer matches the given ones.
@param filelist: a list of file names to be tested.
@param aperture: an aperture that the tested file must match.
@param filter: a filter that the tested file must match.
@return: a list of file names that match the aperture and filter.
'''
#another possibility could be:
#APER_FOV= '50x50 ' / aperture field of view
out = []
for file in filelist:
hdr = PF.open(self.input + file)[0].header
if hdr['APERTURE'].strip() == aperture and \
hdr['FILTER'].strip() == filter:
out.append(file)
return out
def cenwave(self, filelist, cenwave = 5216):
'''
Checks that the central wavelength matches.
@param filelist: a list of file names to be tested.
@param cenwave: a central wavelength that the test file must match.
@return a list of file names that match the aperture and filter.
'''
out = []
for file in filelist:
hdr = PF.open(self.input + file)[0].header
if hdr['CENWAVE'] == cenwave:
out.append(file)
return out
def slitwheelPos(self, filelist, positions, nominal, tolerance = 1):
'''
Finds and matches all slit wheel positions present in files listed in
the filelist variable.
@param filelist: a list of file names to be tested.
@param positions: a list of positions that are used for matching.
@param nominal: the nominal slit wheel position.
@param tolerance: tolerance of slit wheel steps.
@return: dictionary of slit wheel positions and file names
'''
#find all OSWABPS
tmp = []
pos = {}
for file in filelist:
hdr1 = PF.open(self.input + file)[1].header
tmp.append([file, hdr1['OSWABSP']])
for a in positions:
pos[a] = [line[0] for line in tmp if line[1] > nominal + a - tolerance and line[1] < nominal + a + tolerance]
return pos
def writeToASCIIFile(self, data, outputfile, header = ''):
'''
Writes file lists to an ASCII file. Each line contains one filename.
@param data: data that is written to the ascii file.
@param outputfile: the name of the output file.
@param header: header that is included to the beginning of the ascii file.
'''
try:
file = open(self.input + outputfile, 'w')
except:
print 'Problem while opening file %s' % outputfile
try:
file.write(header)
for line in data:
file.write(line+'\n')
except: pass
class PrepareFiles:
'''
Prepares files; modified header keywords.
'''
def __init__(self, input, output):
'''
Note that switches dictionary has been hard coded.
It currently only changes the CRCORR keyword.
@param input: an input directory
@param output: an output directory
'''
self.input = input
self.output = output
self.switches = {'CRCORR' : 'PERFORM'}
def changeKeywords(self, filelist):
'''
Modifies header keywords of FITS files.
Switches self.switches keywords.
@param filelist: a list of filenames
@todo: This will now crash if the keyword is not present...
'''
for file in filelist:
fh = PF.open(file, 'update')
hdr0 = fh[0].header
for key in self.switches:
hdr0[key] = self.switches[key]
fh.close()
class MakeFlat:
'''
This class can be used to generate a pixel-to-pixel flat field
from given data files.
'''
def __init__(self, input, output):
'''
@param input: an input directory
@param output: an output directory
'''
self.input = input
self.output = output
def _plot50(self, flat, xcen, ycen, rad, fname):
'''
This function can be used to plot flat file images where
dust motes have been circled. Will save the output figure
to fname.pdf.
@param flat: flat field array
@param xcen: a list of x central coordinates for dust motes
@param ycen: a list of y central coordinates for dust motes
@param rad: a list of radius for dust motest
'''
ax = PL.subplot(111)
b = PL.gca()
#P.title(fname)
ims = ax.imshow(flat, origin='lower',
cmap = cm.gray,
interpolation = None,
vmin = 0.98,
vmax = 1.02)
cb = PL.colorbar(ims, orientation='vertical')
cb.set_label('Normalized Counts')
#loop over xcen and ycen and ratio and draw circles
for x, y, r in zip(xcen, ycen, rad):
cir = Circle((x,y), radius=r, fc = 'none', ec = 'r')
b.add_patch(cir)
#PL.show()
PL.savefig(fname + '.pdf')
PL.close()
def _plotFit(self, xpx, img, fit, good, i, xnod, ynod, fitynods, tmp, file, column = True):
'''
Plot Spline fit and the data for comparison for ith row or column.
Will also plot median filtered data for columns. The plot is saved
to file_tmp_Fit.pdf
@param xpx: x pixels
@param img: y (counts) values
@param fit: fitted y values
@param good: mask that specifies good pixels
@param i: the ith row or column
@param xnod: a list of x node positions
@param ynod: a list of original y node positions
@param fitynods: a list of fitted y node positions
@param tmp: median filtered count values
@param file: the name of the file that is being plotted
@param column: Boolean to define whether this is a column or row fit plot
'''
fig = PL.figure()
#P.title(file)
left, width = 0.1, 0.8
rect1 = [left, 0.3, width, 0.65]
rect2 = [left, 0.1, width, 0.2]
ax1 = fig.add_axes(rect2) #left, bottom, width, height
ax2 = fig.add_axes(rect1)
if column: ax2.plot(xpx, img[:,i], 'b-', label = 'Data', zorder = 1)
else: ax2.plot(xpx, img[i,:], 'b-', label = 'Data', zorder = 1)
if column: ax2.plot(xpx[good], tmp[good,i], 'y-', label='Median Filtered', zorder = 4)
if column: ax2.plot(xpx, fit[:,i], 'r-', label = 'Spline Fit', lw = 1.5, zorder = 6)
else: ax2.plot(xpx, fit[i,:], 'r-', label = 'Spline Fit', lw = 1.5, zorder = 6)
ax2.plot(xpx, self._cspline(xnod, ynod, xpx), 'm-', label = 'Original Spline', zorder = 5)
ax2.plot(xnod, ynod, 'ms', label = 'Original Nods', zorder = 7)
ax2.plot(xnod, fitynods, 'ro', label = 'Fitted Nods', zorder = 7)
if column: ax1.plot(xpx, fit[:,i]/img[:,i], 'r-', zorder = 6)
else: ax1.plot(xpx, fit[i,:]/img[i,:], 'r-', zorder = 6)
ax1.axhline(1. , zorder = 3)
ax1.set_xlim(-2,1025)
ax2.set_xlim(-2,1025)
ax2.set_ylim(0.98*N.min(tmp[good,i]), 1.02*N.max(tmp[good,i]))
ax1.set_ylim(0.95, 1.05)
ax1.set_ylim(0.95, 1.05)
ax2.set_xticklabels([])
ax2.set_yticks(ax2.get_yticks()[1:])
#ax1.set_yticks(ax1.get_yticks()[::2])
ax1.set_ylabel('Fit / Data')
ax2.set_ylabel('Counts')
if column: ax1.set_xlabel('Column %i Pixels' % i)
else: ax1.set_xlabel('Row %i Pixels' % i)
try:
ax2.legend(numpoints = 1, shadow = True, fancybox = True, loc = 'best')
except:
ax2.legend(loc = 'best')
if column: tmp = 'Column'
else: tmp = 'Row'
PL.savefig(file + '_%sFit.pdf' % tmp)
PL.close()
def _badspot(self, mask, opt_elem):
'''
Loads SMNdust.stis file that holds information about dust speck
locations and sizes. It will return a mask where all dust motes
haves been marked. The function can be used for both L and M-modes
as M-modes are shifted 2 pixels to left and their radius is set
to 16, except for the first two dust specks which are smaller.
@summary: Adds dust motes to bad pixel mask.
@param mask: array of the same size as the image that is used to mask
areas such as dust motes that should not be used for fitting.
@param opt_elem: optical element that was used; L or M -mode
@return: updated mask, x_centre, y_centre, radius of dust specks
@todo: Rewrite; adapted from an IDL routine, thus, looks horrible
'''
s = N.shape(mask)
nx = s[1] - 1
ny = s[0] - 1 # max x & y pixel
#read dust file
dust = N.loadtxt('SMNdust.stis', comments='#')
#first 2 spots on spectrum & are small. cut size
rad = N.zeros(N.shape(dust)[0]) + 12
rad[0:2] -= 4
xcen = dust[:,0]
ycen = dust[:,1]
#; 99jun10 - shift stis medium disp spots left
if 'M' in opt_elem:
#shifting badspots to left
print'STIS Med resolution grating. Shifting badspots left.'
xcen -= 2
rad = rad*0 + 16
rad[0:2] -= 4 # 1st 2 spots on spectrum & are small. cut size
radsq = rad**2.
#this shuold be rewritten...
for i, a in enumerate(xcen):
for j, b in enumerate(rad):
#; 2*rad+1 lines total get some flags
dist = int(math.sqrt(radsq[i] - j**2.) + 0.5)
xmn = xcen[i] - dist
if xmn < 0: xmn = 0
xmx = xcen[i] + dist
if xmx > nx: xmx = nx
yrow = ycen[i] - j
if yrow < 0: yrow = 0
mask[yrow, xmn:xmx] = 0
yrow = ycen[i] + j
if yrow > ny: yrow = ny
mask[yrow, xmn:xmx] = 0
return mask, xcen, ycen, rad
def _fitfunc(self, x, ynodes):
'''
The function that is being fitted.
This can be changed to whatever function if needed.
Note that ynodes can then be a list of parameters.
k defines the order of the spline fitting, it is hard coded
to be 3, but could be changed if needed.
@param x: the x position where to evaluate the B-spline
@param ynodes: y position of the nodes
@return: 1-D evaluated B-spline value at x.
'''
return I.splev(x, I.splrep(self.xnodes, ynodes, k = 3))
def _errfunc(self, ynodes, x, y):
'''
Error function; simply _fitfunction - ydata
@param ynodes: y position of the nodes to be evaluated
@param x: x positions where to evaluate the B-spline
@param y: y positions
@return: Spline evaluated y positions - ydata
'''
return self._fitfunc(x, ynodes) - y
def _splinefitScipy(self, x, y, ynodes, xnodes):
'''
Return the point which minimizes the sum of squares of M (non-linear)
equations in N unknowns given a starting estimate, x0, using a
modification of the Levenberg-Marquardt algorithm.
@param x:
@param y:
@param ynodes:
@param xnodes:
@return: fitted parameters, error/success message
'''
self.xnodes = xnodes
return O.leastsq(self._errfunc, ynodes, args=(x, y))
def _cspline(self, x, y, t):
'''
Uses interpolation to find the B-spline representation of 1-D curve.
@param x: x position
@param y: y position
@param t: position in which to evaluate the B-spline
@return: interpolated y position of the B-sline
'''
tck = I.splrep(x, y)
y2 = I.splev(t, tck)
return y2
def _writeCombinedFits(self, flat, err, dq, head, template, raws, output):
'''
Writes the combined flat field FITS to a file.
Uses old reference files as a template.
@param flat: flat field array
@param err: error array
@param dq: data quality array
@param head: header dictionary
@param template: template file to be used
@param raws: list of raw file names that were used to create the flat
@param output: name of the output file
'''
#output = self.output + output
fh = PF.open(template)
fh[1].data = flat.astype(N.float32)
fh[2].data = err.astype(N.float32)
fh[3].data = dq.astype(N.int16)
hdu0 = fh[0].header
hdr1 = fh[1].header
hdr2 = fh[2].header
hdr3 = fh[3].header
#change 0th header keywords DESCRIP to have right length
for key in head:
if key == 'DESCRIP':
#weird CDBS rule of 67 chars...
l = len(head[key])
if l <= 67:
pad = '-'*(67 - l)
head[key] = head[key] + pad
else:
print 'Problem with DESCRIP keyword, will truncate'
head[key] = head[key][:68]
hdu0.update(key, head[key])
#hardcoded changes
hdu0.update('ORIGIN', 'PyFITS %s version' % PF.__version__)
hdr1.update('ORIGIN', 'PyFITS %s version' % PF.__version__)
hdr2.update('ORIGIN', 'PyFITS %s version' % PF.__version__)
hdr3.update('ORIGIN', 'PyFITS %s version' % PF.__version__)
hdu0.update('FILENAME', output[len(self.output):])
#the first one makes a default FITS comment, but CDBS wants it with equal sign...
#hdu0.update('COMMENT', 'Reference file was created by S.-M. Niemi, %s.' % D.datetime.today().strftime('%B %Y'))
del hdu0['COMMENT']
hdu0.add_comment('= Reference file was created by S.-M. Niemi, %s.' % D.datetime.today().strftime('%B %Y'),
before = 'ORIGIN')
#fix the date
date = D.datetime.today().strftime('%d/%m/%y')
hdu0.update('FITSDATE', date)
hdu0.update('DATE', date)
hdr1.update('DATE', date)
hdr2.update('DATE', date)
hdr3.update('DATE', date)
#fix history
del hdu0['HISTORY']
del hdu0['COMMENTS']
hdu0.add_history('Created on %s, using the' % D.datetime.today().strftime('%B %d %Y'))
hdu0.add_history('Python script STISCCDSpectroscopyFlat.py.')
hdu0.add_history('')
hdu0.add_history('The flat field for spectral modes is independent of')
hdu0.add_history('wavelength but does change with time. For this reason,')
hdu0.add_history('all the L-mode gratings have the same flat field, and all')
hdu0.add_history('the M-mode gratings have the same flat field. An average')
hdu0.add_history('flat is produced from datawith one to four million')
hdu0.add_history('electrons/pixel and is applicable to all CCD first order')
hdu0.add_history('modes after the dust motes are replaced by hose mote regions')
hdu0.add_history('from separate low or medium dispersion flats. Application of')
hdu0.add_history('flat to spectra of standard stars produces an rms residual')
hdu0.add_history('noise level as good as ~0.3%, which is comparable to the')
hdu0.add_history('residual noise achievable with no flat.')
hdu0.add_history('New CCD p-flats constructed primarily from G430M spectral')
hdu0.add_history('flats from program 11852. In L-mode flats, those regions')
hdu0.add_history('affected by dust motes were replaced by data from')
hdu0.add_history('the older cycle 7 l-flat file k2910262o_pfl.fits.')#k2910262o_pfl.fits
hdu0.add_history('The following input files were used:')
for file in raws:
hdu0.add_history(file[len(self.output):])
#make the file
fh.writeto(output)
fh.close()
print '%s has been written...' % output
def _writeMakeFITS(self, file, flat, err, eps, sm, i, output):
'''
Writes the output to a FITS file. This is an intermediate product
so the header is not CDBS compatible.
@param file: the name of the file that is being used as a template
@param flat: flat field array
@param err: error array
@param eps: eps array
@param sm: total number of images
@param i: total number of observations
@param output: name of the output file name
'''
output = self.output + output
ifd = PF.open(file)
orghdr0 = ifd[0].header
#ofd = PF.HDUList(PF.PrimaryHDU(header = orghdr0))
ofd = PF.HDUList(PF.PrimaryHDU())
ifd.close()
#create a new primary header with flat data
#hdu = PF.ImageHDU(data=flat, header=ifd[1].header, name='IMAGE')
hdu = PF.ImageHDU(data=flat, name='IMAGE')
#update('target', 'NGC1234', 'target name')
hdu.header.update('OPT_ELEM', orghdr0['OPT_ELEM'])
#hdu.header.update('MODE_ID', orghdr0['MODE_ID'])
hdu.header.update('APERTURE', orghdr0['APERTURE'])
hdu.header.update('CENWAVE', orghdr0['CENWAVE'])
hdu.header.update('DETECTOR', orghdr0['DETECTOR'])
hdu.header.update('CCDGAIN', orghdr0['CCDGAIN'])
hdu.header.update('TOTIMG', sm)
hdu.header.update('TOTOBS', i)
#add history section
hdu.header.add_history('Flat Written by STISCCDSpectrscopyFlat.py')
hdu.header.add_history('at %s UTC.' % D.datetime.utcnow())
#checks that the header follows rules
hdu.verify('fix')
#appends to the HDUlist
ofd.append(hdu)
#create a new extension header with error array
#hdu2 = PF.ImageHDU(data=err, header=ifd[2].header, name='ERR')
hdu2 = PF.ImageHDU(data=err, name='ERR')
hdu2.header.update('OPT_ELEM', orghdr0['OPT_ELEM'])
#hdu2.header.update('MODE_ID', orghdr0['MODE_ID'])
hdu2.header.update('APERTURE', orghdr0['APERTURE'])
hdu2.header.update('CENWAVE', orghdr0['CENWAVE'])
hdu2.header.update('DETECTOR', orghdr0['DETECTOR'])
hdu2.header.update('CCDGAIN', orghdr0['CCDGAIN'])
hdu2.header.update('TOTIMG', sm)
hdu2.header.update('TOTOBS', i)
#add history section
hdu2.header.add_history('Flat Written by STISCCDSpectrscopyFlat.py')
hdu2.header.add_history('at %s UTC.' % D.datetime.utcnow())
#checks that the header follows rules
hdu2.verify('fix')
#appends to the HDUlist
ofd.append(hdu2)
#create a new extension header with eps array
#hdu3 = PF.ImageHDU(data=eps, header=ifd[3].header, name='EPS')
hdu3 = PF.ImageHDU(data=eps, name='EPS')
hdu3.header.update('OPT_ELEM', orghdr0['OPT_ELEM'])
#hdu3.header.update('MODE_ID', orghdr0['MODE_ID'])
hdu3.header.update('APERTURE', orghdr0['APERTURE'])
hdu3.header.update('CENWAVE', orghdr0['CENWAVE'])
hdu3.header.update('DETECTOR', orghdr0['DETECTOR'])
hdu3.header.update('CCDGAIN', orghdr0['CCDGAIN'])
hdu3.header.update('TOTIMG', sm)
hdu3.header.update('TOTOBS', i)
#add history section
hdu3.header.add_history('Flat Written by STISCCDSpectrscopyFlat.py')
hdu3.header.add_history('at %s UTC.' % D.datetime.utcnow())
#checks that the header follows rules
hdu3.verify('fix')
#appends to the HDUlist
ofd.append(hdu3)
#writes the file
try:
ofd.writeto(output + '.fits')
except:
print 'File %s exists...' % (output + '.fits')
print 'Will write to %s instead!' % (output + 'SMN.fits')
ofd.writeto(output + 'SMN.fits')
def _doStats(self, data, mode):
'''
Calculates and prints out some basic statistics from the given data.
@param data: data array from which the statistics is being calculated.
@param mode: a string related to the mode (L/M)s
'''
#whole data
std = N.std(data)
mean = N.mean(data - 1.)
rms = N.sqrt(mean**2 + std**2)
#100 x 100 pixels at the centre of the data array
sh = N.shape(data)
x , y = sh[0]/2, sh[1]/2
xmin = x - 50 if x - 50 > 0 else 0
xmax = x + 50 if x + 50 <= sh[0] else sh[0]
ymin = y - 50 if y - 50 > 0 else 0
ymax = y + 50 if y + 50 <= sh[1] else sh[1]
ds = data[ymin:ymax, xmin:xmax]
stds = N.std(ds)
means = N.mean(ds - 1)
rmss = N.sqrt(means**2 + stds**2)
print '-'*15 + 'Statistics of %s' % mode + '-'*15
print '%15s'*4 % ('ARRAY', 'MEAN', 'STDEV', 'RMS Noise')
print '%15s%15.5f%16.6f%14.6f' % ('Full', mean + 1., std, rms)
print '%15s%15.5f%16.6f%14.6f' % ('Centre', means + 1., stds, rmss)
def _fitflat(self, hdr, img, mask, nodes, col = False, nomed = False, file = 'name'):
'''
Creates spline fitted flat field array and masked image.
Uses img for the data and nodes for the number of nodes.
This function can be used to fit both column and row direction.
This choice is controlled with the boolean col.
@param hdr: header
@param img: flat field image
@param mask: mask that is being applied and updated
@param nodes: number of nodes being used for the spline fits
@param col: boolean, column (True) or row (False, default) fit
@param nomed: boolean, median (True) or no median filtering (False, default)
@param file: the name of the plot file
@return: Spline fitted data, img / fit, mask
'''
s = N.shape(img)
nx = s[0]
ny = s[1]
det = hdr['DETECTOR'].strip()
#set up positions of spline nodes
if col:
xpx = N.arange(ny, dtype = int)
xnod = N.arange(nodes)*(ny-1.)/(nodes-1.) # spaced nodes incl ends
else:
xpx = N.arange(nx, dtype = int)
xnod = N.arange(nodes)*(nx-1.) / (nodes-1.) #spaced nodes incl ends
#using array copy
fit = img.copy() # unfit lines will be unit flats
tmp = img.copy()
# initialize for 1-d median filter
bad = N.where((mask == 0) & (fit == 0.0))
nbad = len(bad)
if nbad > 0: fit[bad] = 1.0 #to avoid 0/0 in img/fit for all bad row
if col == False:
#add extra pt half way btwn first and last pts at ends
dl = xnod[1] - xnod[0]
xnod = N.insert(xnod, 1, dl/2.)
xnod = N.insert(xnod, -1, (xnod[-1] - dl/2.))
if nomed == False:
'Print MEDIAN filtering for row fitting!'
tmp = SS.medfilt2d(img, 11) # default for CCDs
loop = ny - 1
if col: loop = nx - 1
lenfit = nx
if col:
lenfit = ny
nskip = 0
for i in range(loop+1):
if col: #column fitting
good = mask[:,i] > 0
ngd = len(mask[good])
if float(ngd)/lenfit < 0.55:
mask[:,i] = -1
nskip += 1
continue
if nomed == False:
tmp[good, i] = SS.medfilt(img[good, i], 13)
#this is significantly better initial guess than the one below
ynod = N.array([N.mean(tmp[x-5:x+5, i]) for x in xnod])
ynod[N.isnan(ynod)] = N.mean(tmp[good, i])
#ynod = xnod*0 + N.mean(tmp[good, i])
fitynods, _t = self._splinefitScipy(xpx[good], tmp[good, i], ynod, xnod)
fit[:,i] = self._cspline(xnod, fitynods, xpx)
if i == 650: self._plotFit(xpx, img, fit, good, i, xnod, ynod, fitynods, tmp, file)
else: #row fitting
good = mask[i,:] > 0
ngd = len(mask[good])
if float(ngd)/lenfit < 0.55:
mask[i,:] = -1
nskip += 1
continue
#reallocate xnodes esp for STIS tung 52x2 w/ 1st 215 px ignored
xnod = N.arange(nodes)*(N.max(xpx[good])-xpx[good][0])/(nodes-1) + xpx[good][0]
ynod = N.array([N.mean(tmp[i, x-5:x+5]) for x in xnod])
ynod[N.isnan(ynod)] = N.mean(tmp[i, good])
fitynods, _t = self._splinefitScipy(xpx[good], tmp[i, good], ynod, xnod)
fit[i,:] = self._cspline(xnod, fitynods, xpx)
if i == 600: self._plotFit(xpx, img, fit, good, i, xnod, ynod, fitynods, tmp, file, column = False)
print 'FITLAT with %i NODES' % len(xnod)
print '%i fits were skipped...' % nskip
bad = N.where(fit == 0)
if len(bad[0]) > 0:
print bad
print 'Problem at fit points, will replace with 1.0'
fit[bad] = 1.0 #to avoid 0/0 in img/fit
flat = img/fit
#self._plot50(flat, (0,0), (0,0), (0,0), 'foo')
return fit, flat, mask
def crreject(self, filelist):
'''
Runs CalSTIS for each file in the file list.
Uses default settings.
@param filelist: list of files being run through CalSTIS.
'''
for file in filelist:
stis.calstis(file)
def combine(self, filelist, output, crsigmas = '20'):
'''
Combines all files in filelist using ocrreject PyRAF task.
@param filelist: filelist being processed
@param output: output list
@param crsigmas: cosmic ray rejection sigma clipping value
'''
stis.ocrreject(filelist, output = output, crsigmas = crsigmas)
def make50CDD(self, files, colnodes = 25, rownodes = 13):
'''
Makes a flat field from all files that has been taken with clear
aperture (50CCD). Each input file will write an output FITS file.
Makes also plots out from the data that has been fitted and masked
that can be used to check the results for any failed spline fits, etc.
@param files: a list of file names that are being used
@param colnodes: the number of nodes used of column fits (default = 25)
@param rownodes: the number of nodes used for row fits (default = 13)
@summary: Creates a flat field
@todo: rewrite away from IDL
'''
files = [self.output + x for x in files]
for i, file in enumerate(files):
#open file and pull out required information
fh = PF.open(file)
dcr = fh[1].data
err = fh[2].data
eps = fh[3].data
hdr0 = fh[0].header
hdr1 = fh[1].header
fh.close()
lamp = hdr0['SCLAMP'].strip()
aper = hdr0['APERTURE'].strip()
mode = hdr0['OPT_ELEM'].strip()
gain = 'g' + str(hdr0['CCDGAIN'])
if 'M' in mode:
mode += '-' + str(hdr0['CENWAVE']) + '_'
if aper == '50CCD': mode += '50CCD'
eps = 0*eps
nused = hdr1['NCOMBINE']
sm = N.max(nused)
ny, nx = N.shape(dcr)
onemsk = N.ones((nx,ny), dtype = N.int) #mask of where flat = 1
ignmsk = N.ones((nx,ny), dtype = N.int) #mask of pts to keep, but ignore in flt
#bad spots to ignmsk
ignmsk, xcen, ycen, rad = self._badspot(ignmsk, hdr0['OPT_ELEM'].strip())
onemsk[:,0] = 0 # general STIS bad 1st and last col
onemsk[:,1023] = 0
onemsk[994:1024,:] = 0 # "vignetted" at top indep of slit pos.
onemsk[0,:] = 0 # first row
onemsk[0:5,253:259] = 0 # 99feb4 hole made vert stripe at bott.
onemsk[14:16,768:770] = 0 # ... for D2 G230LB, 50CCD
if aper== '50CCD':
onemsk[0:15, :] = 0
cenlo = 280
cenhi = 742 # orig fid centers
if 'M' in mode:
if aper == '50CCD':
onemsk[0:38, :] = 0
onemsk[975:1024, :] = 0 # top clipped
cenlo = 304
cenhi = 766
#fiducial masks
if aper != '50CCD':
onemsk[cenlo-14:cenlo+15, :] = 0
onemsk[cenhi-19:cenhi+20, :] = 0
err[onemsk == 0] = 0 # err=0 means do not use data
eps[onemsk == 0] = 512 # all px rejected flag
onemsk[eps > 150] = 0 # keep warm px=150
#fit in col dir first because of glints along L&R edges for 50ccd D2-5216
cmask = onemsk*ignmsk
PL.imshow(cmask, origin='lower', interpolation = None)
PL.savefig(file[:-5]+'_cmask.pdf')
PL.close()
colfit, flat, cmask = self._fitflat(hdr0, dcr, cmask, colnodes, col = True, file = file[:-5])
if lamp == 'TUNGSTEN' and aper == '52X2':
print ' Cannot do row fit for ', lamp, aper
fit = colfit.copy()
rmask = cmask.copy()
else:
#iterate w/ row fits for no em lines
#ignmsk areas filled w/ the fits
rmask = onemsk.copy()
if mode == 'G430M-5216_50CCD':
print'Omit rowfit at ends of ', mode
rmask[:,0:61] = 0
rmask[:,930:1024] = 0
fit, flat, rmask = self._fitflat(hdr0, colfit, rmask, rownodes, nomed = True, file = file[:-5])
if mode == 'G430M-5216_50CCD':
fit[:, 0:61] = colfit[:, 0:61] # bumps at ends of rows
fit[:, 930:1024] = colfit[:, 930:1024]
flat = dcr/fit
indx = N.where((cmask == -1) & (rmask == -1)) # neither row or col fit
if len(indx) > 0: err[indx] = 0 #err=0 means do not use data
if len(indx) > 0: eps[indx] = 512 #all px rejected flag
flat[onemsk == 0] = 1.0
err = err/fit # error in units of flat
#remove vertical ringing
yflat = N.ones(1024)
for iy in range(1024):
inc = cmask[iy, :] == 1
if len(flat[iy, inc]) > 0: yflat[iy] = N.mean(flat[iy, inc])
yflatm = SS.medfilt(yflat, 5)
for iy in range(1024): flat[iy,:] = flat[iy,:]/yflatm[iy]
if len(flat[N.isnan(flat)]) > 0:
print 'ERROR: there are NaNs in the flat:', flat[N.isnan(flat)]
#make plot
self._plot50(flat, xcen, ycen, rad, file[:-5])
#create a FITS file
fname=['ppG430M_50CCD_gain%s_flat' % gain[1:],'ppG430M_50CCD_gain%s_flat' % gain[1:]]
self._writeMakeFITS(file, flat, err, eps, sm, i, fname[i])
def make52X2(self, files, slitpos, colnodes = 13, rownodes = 13):
'''
Makes a flat field from all files that has been taken with a long slit
in the light path (52x2). Each input file will write an output FITS file.
Makes also plots out from the data that has been fitted and masked
that can be used to check the results for any failed spline fits, etc.
@param files: a list of file names that are being used
@param slitpos: a list of slit positions that correspond to each file
@param colnodes: the number of nodes used of column fits (default = 13)
@param rownodes: the number of nodes used for row fits (default = 13)
@summary: Creates a flat field
@todo: rewrite away from IDL. Get rid off the hard coded file names
'''
files = [self.output + x for x in files]
for i, file in enumerate(files):
#open file and pull out required information
fh = PF.open(file)
dcr = fh[1].data
err = fh[2].data
eps = fh[3].data
hdr0 = fh[0].header
hdr1 = fh[1].header
fh.close()
lamp = hdr0['SCLAMP'].strip()
aper = hdr0['APERTURE'].strip()
mode = hdr0['OPT_ELEM'].strip()
gain = 'g' + str(hdr0['CCDGAIN'])
if 'M' in mode:
mode += '-' + str(hdr0['CENWAVE']) + '_'
if aper == '50CCD': mode += '50CCD'
eps = 0*eps
nused = hdr1['NCOMBINE']
#nused = hdr1['TOTIMG']
sm = N.max(nused)
sum='sm%i' % (sm)
print 'Processing file %s (%s)' % (file, mode+gain+sum)
ny, nx = N.shape(dcr)
onemsk = N.ones((nx,ny), dtype = N.int) #mask of where flat = 1
ignmsk = N.ones((nx,ny), dtype = N.int) #mask of pts to keep, but ignore in flt
#bad spots to ignmsk
ignmsk, xcen, ycen, rad = self._badspot(ignmsk, hdr0['OPT_ELEM'].strip())
onemsk[:,0] = 0 # general STIS bad 1st and last col
onemsk[:,1023] = 0
onemsk[1013:1024, :] = 0 # "vignetted" at top indep of slit pos.
onemsk[0,:] = 0 # first row
onemsk[0:5,253:259] = 0 # 99feb4 hole made vert stripe at bott.
onemsk[14:16,768:770] = 0 # ... for D2 G230LB, 50CCD
cenlo = 280 - 0.0141*slitpos[i]
cenhi = 742 - 0.0141*slitpos[i] # orig fid centers
if 'M' in mode:
cenlo = 286 - 0.0141*slitpos[i]
cenhi = 748 - 0.0141*slitpos[i]
botedge = 22 - 0.0141*slitpos[i]
topedge = 1042 - 0.0141*slitpos[i]
if botedge < 1: botedge = 1
if topedge > ny - 1: topedge = ny - 1
if '5126' in mode:
onemsk[0:botedge+1, :] = 0 # bad at bottom
onemsk[topedge:ny, :] = 1 # top OK
cenlo = 304 - 0.0141*slitpos[i]
cenhi = 766 - 0.0141*slitpos[i]
if '6094' in mode:
onemsk[1013:1024, :] = 0
cenlo = 259 - 0.0141*slitpos[i]
cenhi = 721 - 0.0141*slitpos[i]
#print file, cenlo, cenhi
cenlo += 18.
cenhi += 18.
#fiducial masks
if '52' in aper:
onemsk[cenlo-14:cenlo+15, :] = 0
onemsk[cenhi-19:cenhi+20, :] = 0
onemsk[0:botedge+1, :] = 0
onemsk[topedge:ny, :] = 0
err[onemsk == 0] = 0 # err=0 means do not use data
eps[onemsk == 0] = 512 # all px rejected flag
onemsk[eps > 150] = 0 # keep warm px=150
#fit in col dir first because of glints along L&R edges for 50ccd D2-5216
cmask = onemsk*ignmsk
PL.imshow(cmask, origin='lower', interpolation = None)
PL.savefig(file[:-5]+'_cmask.pdf')
PL.close()
#PL.imshow(onemsk, origin='lower', interpolation = None)
#PL.savefig(file[:-5]+'_onemask.pdf')
#PL.close()
#PL.imshow(ignmsk, origin='lower', interpolation = None)
#PL.savefig(file[:-5]+'_ignmask.pdf')
#PL.close()
colfit, flat, cmask = self._fitflat(hdr0, dcr, cmask, colnodes, col = True, file = file[:-5])
if lamp == 'TUNGSTEN' and aper == '52X2':
print ' Cannot do row fit for ', lamp, aper
fit = colfit.copy()
rmask = cmask.copy()
else:
#iterate w/ row fits for no em lines
#ignmsk areas filled w/ the fits
rmask = onemsk.copy()
if mode == 'G430M-5216_50CCD':
print'Omit rowfit at ends of ', mode
rmask[:,0:61] = 0
rmask[:,930:1024] = 0
fit, flat, rmask = self._fitflat(hdr0, colfit, rmask, rownodes, nomed = True, file = file[:-5])
if mode == 'G430M-5216_50CCD':
fit[:, 0:61] = colfit[:, 0:61] # bumps at ends of rows
fit[:, 930:1024] = colfit[:, 930:1024]
flat = dcr/fit
indx = N.where((cmask == -1) & (rmask == -1)) # neither row or col fit
if len(indx) > 0: err[indx] = 0 #err=0 means do not use data
if len(indx) > 0: eps[indx] = 512 #all px rejected flag
flat[onemsk == 0] = 1.0
err = err/fit # error in units of flat
#remove vertical ringing
yflat = N.ones(1024)
for iy in range(1024):
inc = cmask[iy, :] == 1
if len(flat[iy, inc]) > 0:
yflat[iy] = N.mean(flat[iy, inc])
flat[iy,:] = flat[iy,:]/yflat[iy]
if len(flat[N.isnan(flat)]) > 0:
print 'ERROR: there are NaNs in the flat:', flat[N.isnan(flat)]
#make plot
self._plot50(flat, xcen, ycen, rad, file[:-5])
#create a FITS file
fname=['pG430M_52x2_gain4m7300_flat','pG430M_52x2_gain4m3640_flat',
'pG430M_52x2_gain4p0000_flat','pG430M_52x2_gain4p3640_flat',
'pG430M_52x2_gain4p7300_flat']
self._writeMakeFITS(file, flat, err, eps, sm, i, fname[i])
def CombineFinalFlatS(self, list, headerl, headerm, raws):
'''
Combines all idicidual images to form a single combined flat field for
each given mode. Handles flux, error and data quality arrays. Images are
combined using weights for each pixel that are calculated using the error
arrays. Will take into account the only the pixels that has not been
flagged in the data quality array.
This function will also call other functions to calculate some basic
statistics and to create some plots from the combined image.
For L-modes the dust have to pasted from another file. Will use the one
that is in the CDBS: /grp/hst/cdbs/oref/k2910262o_pfl.fits.
@param list: a list of files to be combined
@param headerl: a header for the l-mode combined image
@param headerm: a header for the m-mode combined image
@param raws: list of raw file names that were used to create the flat
'''
#hard coded value
siglim = 5
nimage = len(list)
#make zero arrays
added_flux = N.zeros((1024, 1024))
added_error = N.zeros((1024, 1024))
added_dq = N.zeros((1024, 1024))
weights = N.zeros((1024, 1024))
fluxall = N.zeros((1024, 1024, nimage))
errorall = N.zeros((1024, 1024, nimage))
flagsall = N.zeros((1024, 1024, nimage))
for i, file in enumerate(list):
fh = PF.open(file)
fluxin = fh[1].data
errorin = fh[2].data
flagsin = fh[3].data
fh.close()
if i > 1: flagsin[225:801, 180] = flagsin[225:801, 180] + 1024
fluxall[:,:,i] = fluxin.copy()
errorall[:,:,i] = errorin.copy()
flagsall[:,:,i] = flagsin.copy()
for ix in range(1024):
for iy in range(1024):
added_dq[iy, ix] = N.min(flagsall[iy, ix, :])
ig = N.where((flagsall[iy, ix, :] == 0) & (errorall[iy, ix,:] > 0.0))
if len(ig[0]) <= 0:
added_flux[iy, ix] = 1.
added_error[iy,ix] = 0.1
elif len(ig[0]) <= 2:
added_flux[iy,ix] = N.sum(fluxall[iy,ix,ig]/errorall[iy,ix,ig]**2)/N.sum(1./errorall[iy,ix,ig]**2)
added_error[iy,ix] = N.sqrt(1./N.sum(1./errorall[iy,ix,ig]**2))
else:
center = N.median(fluxall[iy,ix,ig])
std = N.max(errorall[iy,ix,ig])
igg = N.where(N.abs(fluxall[iy,ix,ig] - center) < siglim*std)
if len(igg[0]) >= 1:
cov = N.where(((flagsall[iy, ix, :] == 0) & (errorall[iy, ix,:] > 0.0)) & (N.abs(fluxall[iy,ix,:] - center) < siglim*std))
added_flux[iy,ix] = N.sum(fluxall[iy,ix,cov]/errorall[iy,ix,cov]**2)/N.sum(1./errorall[iy,ix,cov]**2)
added_error[iy,ix] = N.sqrt(1./N.sum(1./errorall[iy,ix,cov]**2))
else:
added_flux[iy,ix] = center
added_error[iy,ix] = std
#fix spurious errors
added_error[added_error > 0.1] = 0.1
added_flux[added_flux < 0.0] = 1.
added_dq[added_flux < 0.0] = 1024
#low and med res copies
flat_l = added_flux.copy()
flat_m = added_flux.copy()
err_l = added_error.copy()
err_m = added_error.copy()
dq_l = added_dq.copy()
dq_m = added_dq.copy()
# flag and patch dust motes (dq=1024)
m_mote = N.ones((1024,1024), dtype = N.int)
l_mote = N.ones((1024,1024), dtype = N.int)
l_mote, xlcen, ylcen, radl = self._badspot(l_mote, 'G430L')
m_mote, xmcen, ymcen, radm = self._badspot(m_mote, 'G430M')
# since basic flats are from m mode data, have to also replace area covered by
# m-mode motes with "good" l mode data to produce l mode pflat.
l_mote_ext = l_mote*m_mote
# flag dust motes with 1024
dq_l[l_mote_ext == 0] = dq_l[l_mote_ext == 0] + 1024
dq_m[m_mote == 0] = dq_m[m_mote == 0] + 1024
# leave m-mode modes alone, but paste into l_mode motes from another file
templ_f = PF.open('/grp/hst/cdbs/oref/k2910262o_pfl.fits')[1].data
templ_e = PF.open('/grp/hst/cdbs/oref/k2910262o_pfl.fits')[2].data
l_mote_loc = N.where(l_mote_ext == 0)
flat_l[l_mote_loc] = templ_f[l_mote_loc]
err_l[l_mote_loc] = templ_e[l_mote_loc]
# write individual extensions of low and high disp file
templ = '/grp/hst/cdbs/oref/n491401ho_pfl.fits' #
tempm = '/grp/hst/cdbs/oref/n491401ko_pfl.fits' #'n491401eo_pfl.fits'
self._writeCombinedFits(flat_l, err_l, dq_l, headerl, templ, raws, self.output + 'coadd_comb_reject_l.fits')
self._writeCombinedFits(flat_m, err_m, dq_m, headerm, tempm, raws, self.output + 'coadd_comb_reject_m.fits')
#make some extra plots
self._plot50(flat_l, xlcen, ylcen, radl, 'coadd_comb_reject_l')
self._plot50(flat_m, xmcen, ymcen, radm, 'coadd_comb_reject_m')
#print out some information
self._doStats(flat_l, 'L-mode Flat')
self._doStats(flat_m, 'M-mode Flat')
def MakeCopies(self):
'''
Copies newly created p-flat files over to a new directory and
names the new files appropriately which are appropriate for CDBS
delivery.
'''
outdir = './final/'
try:
os.mkdir(outdir)
except:
print 'Final output directory exists'
pass
#shutil.copy(src, dst)
l = './out/coadd_comb_reject_l.fits'
m = './out/coadd_comb_reject_m.fits'
mfiles = ['g230mb_new_pfl.fits', 'g430m_new_pfl.fits', 'g750m_new_pfl.fits']
lfiles = ['g230lb_new_pfl.fits', 'g430l_new_pfl.fits', 'g750l_new_pfl.fits']
for file in mfiles:
shutil.copy(m, outdir + file)
fh = PF.open(outdir + file, mode='update')
hdr0 = fh[0].header
hdr0['FILENAME'] = file
hdr0['OPT_ELEM'] = file[:file.find('_')].upper()
fh.close()
for file in lfiles:
shutil.copy(l, outdir + file)
fh = PF.open(outdir + file, mode='update')
hdr0 = fh[0].header
hdr0['FILENAME'] = file
hdr0['OPT_ELEM'] = file[:file.find('_')].upper()
fh.close()
def process_args(just_print_help = False):
'''
Processes and parses the command line arguments.
Will also print help and the version of the script if requested.
'''
from optparse import OptionParser
usage = 'usage: %prog [options]'
desc = 'This script can be used to generate STIS CCD spectroscopic pixel-to-pixel flat field.'
parser = OptionParser(usage = usage, version='%prog ' + __version__, description = desc)
parser.add_option('-o', '--output', dest='output',
help='Output directory. If not given, will use ./obs/',
metavar='string')
parser.add_option('-i', '--input', dest='input',
help='Input directory. If not given, will use ./out/',
metavar='string')
parser.add_option('-f', '--filelists', action='store_true', dest='filelist',
help='Will list each suitable crj file in a file list.')
parser.add_option('-c', '--crreject', action='store_true', dest='crreject',
help='Will run CalSTIS for all raw files.')
parser.add_option('-b', '--combine', action='store_true', dest='combine',
help='Will combine suitable images using ocrreject.')
parser.add_option('-5', '--50ccd', action='store_true', dest='ccd',
help='Will generate flat field images from 50CCD observations.')
parser.add_option('-s', '--slit', action='store_true', dest='slit',
help='Will generate flat field images from 52X2 observations.')
parser.add_option('-g', '--generate', action='store_true', dest='generate',
help='Will generate the final flat field images.')
parser.add_option('-m', '--make', action='store_true', dest='copy',
help='Will copy generated files to a new directory.')
parser.add_option('--stats', action='store_true', dest='stats',
help='Calculates some statistics from coadd_*fits files.')
if just_print_help:
parser.print_help()
else:
return parser.parse_args()
def checkZeroArguments(opts):
'''
Checks if no command line arguments were given.
@param opts: option parser instance.
@requires: True or False
'''
for x in opts.__dict__:
if opts.__dict__[x] is not None:
return True
return False
if __name__ == '__main__':
'''
The main program starts here.
'''
#HARDCODED values:
slitpos = [-7300, -3640, 0, 3640, 7300] #slit wheel positions
#option parser
opts, args = process_args()
#process zero arguments
if checkZeroArguments(opts) == False:
print 'Will do all'
opts.filelist = True
opts.crreject = True
opts.combine = True
opts.ccd = True
opts.slit = True
opts.generate = True
opts.copy = True
if opts.output is None:
output = './out/'
else:
output = opts.output
if opts.input is None:
input = './obs/'
else:
input = opts.input
#test that folders exists
if os.path.isdir(input) == False: sys.exit('No valid input directory, please specify one!')
if os.path.isdir(output) == False: os.mkdir(output)
#objects
F = Findfiles(input, output)
P = PrepareFiles(input, output)
M = MakeFlat(input, output)
#find all raws that match the obs mode
allRaws = G.glob(input + '*_raw.fits')
raws = F.obsMode(allRaws)
if opts.crreject :
#modify headers and run calstis
print '\n\nModifying header keywords and running CalSTIS...'
P.changeKeywords(raws)
M.crreject(raws)
if opts.filelist:
print '\n\nGenerating file lists...'
#find all crj files
org = os.getcwd()
os.chdir(input)
allCrjs = G.glob('*_crj.fits')
os.chdir(org)
#50ccd
cr = F.apertureFilter(allCrjs, aperture = '50CCD', filter = 'Clear')
gain1 = F.gain(cr, value = 1)
gain4 = F.gain(cr, value = 4)
if len(gain1) == 0 or len(gain4) == 0:
print 'Did not find find suitable 50CCD Clear files for one of the gain settings...'
else:
F.writeToASCIIFile(gain1, 'g430m_50ccd_gain1_crj.txt')
F.writeToASCIIFile(gain4, 'g430m_50ccd_gain4_crj.txt')
#52x2
nominal = 3242355
a52x2 = F.apertureFilter(allCrjs, aperture = '52X2', filter = 'Clear')
poss = F.slitwheelPos(a52x2, slitpos, nominal, tolerance = 2)
for val in poss:
if val < 0:
F.writeToASCIIFile(poss[val], 'g430m_52x2m%i_crj.txt' % -val)
elif val == 0:
F.writeToASCIIFile(poss[val], 'g430m_52x2_crj.txt')
else:
F.writeToASCIIFile(poss[val], 'g430m_52x2p%i_crj.txt' % val)
if opts.combine:
#combine similar images
org = os.getcwd()
os.chdir(input)
txts = G.glob('*.txt')
print '\n\nCombining individual images...'
for a in txts:
M.combine('@'+a, a[:-3] + 'fits', crsigmas = '20')
shutil.move(a[:-3] + 'fits', '../' + output)
os.chdir(org)
if opts.ccd:
#Make 50CCD flats
print '\n\nCreating 50CCD flats'
ccdFiles = ['g430m_50ccd_gain1_crj.fits', 'g430m_50ccd_gain4_crj.fits']
#M.make50CDD(ccdFiles, colnodes = 25, rownodes = 13)
M.make50CDD(ccdFiles, colnodes = 20, rownodes = 13) #colnodes = 20 seems to work better than 25
if opts.slit:
#Make 52x2 flats
print '\n\nCreating 52x2 flats'
Sfiles_tmp = ['g430m_52x2m7300','g430m_52x2m3640','g430m_52x2','g430m_52x2p3640','g430m_52x2p7300']
Sfiles = [file + '_crj.fits' for file in Sfiles_tmp]
M.make52X2(Sfiles, slitpos, colnodes = 13, rownodes = 13)
if opts.generate:
#Combine to a final flat
print '\n\nCombining flats'
toCombine = ['ppG430M_50CCD_gain4_flat.fits',
'ppG430M_50CCD_gain1_flat.fits',
'pG430M_52x2_gain4m7300_flat.fits',
'pG430M_52x2_gain4m3640_flat.fits',
'pG430M_52x2_gain4p0000_flat.fits',
'pG430M_52x2_gain4p3640_flat.fits',
'pG430M_52x2_gain4p7300_flat.fits']
headerl = {'USEAFTER' : 'May 12 2009 00:00:00',
'DESCRIP' : 'REVISED ON-ORBIT STIS SPECTROSCOPIC CCD P-FLAT FOR L-MODES',
'PEDIGREE': 'INFLIGHT 16/08/2009 15/12/2009'}
headerm = {'USEAFTER' : 'May 12 2009 00:00:00',
'PEDIGREE': 'INFLIGHT 16/08/2009 15/12/2009',
'DESCRIP' : 'REVISED ON-ORBIT STIS SPECTROSCOPIC CCD P-FLAT FOR M-MODES'}
toCombine = [output + line for line in toCombine]
M.CombineFinalFlatS(toCombine, headerl, headerm, raws)
if opts.copy:
print '\n\nMaking copies...'
M.MakeCopies()
if opts.stats:
M._doStats(PF.open('./out/coadd_comb_reject_l.fits')[1].data, 'L')
M._doStats(PF.open('./out/coadd_comb_reject_m.fits')[1].data, 'M')
print '\n\nScript Ends!'
|
bsd-2-clause
|
samuel1208/scikit-learn
|
sklearn/linear_model/tests/test_base.py
|
120
|
10082
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
|
bsd-3-clause
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/matplotlib/backends/backend_gtk3cairo.py
|
8
|
2347
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from . import backend_gtk3
from . import backend_cairo
from .backend_cairo import cairo, HAS_CAIRO_CFFI
from matplotlib.figure import Figure
class RendererGTK3Cairo(backend_cairo.RendererCairo):
def set_context(self, ctx):
if HAS_CAIRO_CFFI:
ctx = cairo.Context._from_pointer(
cairo.ffi.cast(
'cairo_t **',
id(ctx) + object.__basicsize__)[0],
incref=True)
self.gc.ctx = ctx
class FigureCanvasGTK3Cairo(backend_gtk3.FigureCanvasGTK3,
backend_cairo.FigureCanvasCairo):
def __init__(self, figure):
backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
def _renderer_init(self):
"""use cairo renderer"""
self._renderer = RendererGTK3Cairo(self.figure.dpi)
def _render_figure(self, width, height):
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def on_draw_event(self, widget, ctx):
""" GtkDrawable draw event, like expose_event in GTK 2.X
"""
# the _need_redraw flag doesnt work. it sometimes prevents
# the rendering and leaving the canvas blank
#if self._need_redraw:
self._renderer.set_context(ctx)
allocation = self.get_allocation()
x, y, w, h = allocation.x, allocation.y, allocation.width, allocation.height
self._render_figure(w, h)
#self._need_redraw = False
return False # finish event propagation?
class FigureManagerGTK3Cairo(backend_gtk3.FigureManagerGTK3):
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK3Cairo(figure)
manager = FigureManagerGTK3Cairo(canvas, num)
return manager
FigureCanvas = FigureCanvasGTK3Cairo
FigureManager = FigureManagerGTK3Cairo
show = backend_gtk3.show
|
mit
|
vigilv/scikit-learn
|
sklearn/neighbors/tests/test_approximate.py
|
71
|
18815
|
"""
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
|
bsd-3-clause
|
georgyberdyshev/ascend
|
models/steam/stab.py
|
1
|
3034
|
# this is a script that computes the stability of the DAE system of equations
# by using the sparse matrix routines in scipy and plotting with matplotlib.
#
# you could get fancy and produce a root locus using this technique...
import ascpy
L = ascpy.Library()
L.load('steam/dsgsat3.a4c')
T = L.findType('dsgsat3')
M = T.getSimulation('sim',False)
M.run(T.getMethod('on_load'))
print "STEADY-STATE SOLUTION..."
M.solve(ascpy.Solver('QRSlv'),ascpy.SolverReporter())
M.run(T.getMethod('configure_dynamic'))
M.solve(ascpy.Solver('QRSlv'),ascpy.SolverReporter())
M.run(T.getMethod('free_states'))
# here is the peturbation...
print "CREATING PETURBATION..."
M.qdot_s.setRealValueWithUnits(6000,"W/m")
# IDA has its own initial conditions solver, so no need to call QRSlv here
I = ascpy.Integrator(M)
I.setEngine('IDA')
I.setParameter('linsolver','DENSE')
I.setParameter('safeeval',True)
I.setParameter('rtol',1e-4)
I.setParameter('atolvect',False)
I.setParameter('atol',1e-4)
I.setParameter('maxord',2)
I.setParameter('calcic','YA_YDP')
I.setInitialSubStep(0.001)
I.setReporter(ascpy.IntegratorReporterConsole(I))
I.setLogTimesteps(ascpy.Units("s"), 0.001, 0.002, 10)
I.analyse()
F = file('gz.mm','w')
I.writeMatrix(F,'dg/dz')
F = file('gx.mm','w')
I.writeMatrix(F,'dg/dx')
F = file('fz.mm','w')
I.writeMatrix(F,'df/dz')
F = file('fx.mm','w')
I.writeMatrix(F,'df/dx')
F = file('fxp.mm','w')
I.writeMatrix(F,"df/dx'")
#I.solve()
from scipy import io
from scipy import linalg
gz = io.mmread('gz.mm')
gx = io.mmread('gx.mm')
fz = io.mmread('fz.mm')
fx = io.mmread('fx.mm')
fxp = io.mmread('fxp.mm')
print "gz", gz.shape
print "gx", gx.shape
print "fz", fz.shape
print "fx", fx.shape
print "fxp", fxp.shape
#import pylab
# dg/dy_a
#pylab.spy2(ga.todense())
#pylab.title("${dg}/{dy_a}$")
#pylab.show()
invgz = linalg.inv(gz.todense())
#pylab.figure()
#pylab.spy(invgz)
#pylab.title("$({dg}/{dy_d})^{-1}$")
#pylab.show()
# dg/dy_d
#pylab.figure()
#pylab.spy2(gd.todense())
#pylab.title("${dg}/{dy_d}$")
#pylab.show()
# df/dyd'
#pylab.figure()
#pylab.spy2(fdp.todense())
#pylab.title("${df}/{d\dot{y}_d}$")
#pylab.show()
invfxp = linalg.inv(fxp.todense())
#pylab.spy2(invfdp)
#pylab.title("$({df}/{dy_dp})^{-1}$")
#pylab.show()
dya_dyd = invgz * gx
print "gz^-1 gx",dya_dyd.shape
#pylab.spy2(dya_dyd.todense())
#pylab.title("${dy_a}/{dy_d}$")
#pylab.show()
B = fz * invgz * gx
print "fz gz^1 gz",B.shape
#pylab.spy2(fad.todense())
#pylab.title("${df}/{dy_a} * {dy_a}/{dy_d}$")
#pylab.show()
C = fx + B
D = - invfxp * C
e,v = linalg.eig(D.todense())
#print e
print "ROOT RANGE-----------"
print "max re(e)",max(e.real)
print "min re(e)",min(e.real)
print "max im(e)",max(e.imag)
print "min in(e)",min(e.imag)
sys.stdout.flush()
#I.solve()
import pylab, sys
sys.stderr.write("about to plot...")
pylab.plot(e.real,e.imag,'rx')
pylab.xlabel('Real axis')
pylab.ylabel('Imaginary axis')
pylab.show()
sys.stderr.write("DONE\n")
I.setLogTimesteps(ascpy.Units("s"), 0.0005, 3600, 10)
I.setParameter('calcic','Y')
I.solve()
|
gpl-2.0
|
kaiserroll14/301finalproject
|
main/pandas/tseries/frequencies.py
|
9
|
36464
|
from datetime import datetime,timedelta
from pandas.compat import range, long, zip
from pandas import compat
import re
import warnings
import numpy as np
from pandas.core.algorithms import unique
from pandas.tseries.offsets import DateOffset
from pandas.util.decorators import cache_readonly
import pandas.tseries.offsets as offsets
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
import pandas._period as period
from pandas.tslib import Timedelta
from pytz import AmbiguousTimeError
class FreqGroup(object):
FR_ANN = 1000
FR_QTR = 2000
FR_MTH = 3000
FR_WK = 4000
FR_BUS = 5000
FR_DAY = 6000
FR_HR = 7000
FR_MIN = 8000
FR_SEC = 9000
FR_MS = 10000
FR_US = 11000
FR_NS = 12000
class Resolution(object):
# defined in period.pyx
# note that these are different from freq codes
RESO_US = period.US_RESO
RESO_MS = period.MS_RESO
RESO_SEC = period.S_RESO
RESO_MIN = period.T_RESO
RESO_HR = period.H_RESO
RESO_DAY = period.D_RESO
_reso_str_map = {
RESO_US: 'microsecond',
RESO_MS: 'millisecond',
RESO_SEC: 'second',
RESO_MIN: 'minute',
RESO_HR: 'hour',
RESO_DAY: 'day'}
_str_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_str_map)])
_reso_freq_map = {
'year': 'A',
'quarter': 'Q',
'month': 'M',
'day': 'D',
'hour': 'H',
'minute': 'T',
'second': 'S',
'millisecond': 'L',
'microsecond': 'U',
'nanosecond': 'N'}
_freq_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_freq_map)])
@classmethod
def get_str(cls, reso):
"""
Return resolution str against resolution code.
Example
-------
>>> Resolution.get_str(Resolution.RESO_SEC)
'second'
"""
return cls._reso_str_map.get(reso, 'day')
@classmethod
def get_reso(cls, resostr):
"""
Return resolution str against resolution code.
Example
-------
>>> Resolution.get_reso('second')
2
>>> Resolution.get_reso('second') == Resolution.RESO_SEC
True
"""
return cls._str_reso_map.get(resostr, cls.RESO_DAY)
@classmethod
def get_freq_group(cls, resostr):
"""
Return frequency str against resolution str.
Example
-------
>>> f.Resolution.get_freq_group('day')
4000
"""
return get_freq_group(cls.get_freq(resostr))
@classmethod
def get_freq(cls, resostr):
"""
Return frequency str against resolution str.
Example
-------
>>> f.Resolution.get_freq('day')
'D'
"""
return cls._reso_freq_map[resostr]
@classmethod
def get_str_from_freq(cls, freq):
"""
Return resolution str against frequency str.
Example
-------
>>> Resolution.get_str_from_freq('H')
'hour'
"""
return cls._freq_reso_map.get(freq, 'day')
@classmethod
def get_reso_from_freq(cls, freq):
"""
Return resolution code against frequency str.
Example
-------
>>> Resolution.get_reso_from_freq('H')
4
>>> Resolution.get_reso_from_freq('H') == Resolution.RESO_HR
True
"""
return cls.get_reso(cls.get_str_from_freq(freq))
def get_to_timestamp_base(base):
"""
Return frequency code group used for base of to_timestamp against
frequency code.
Example
-------
# Return day freq code against longer freq than day
>>> get_to_timestamp_base(get_freq_code('D')[0])
6000
>>> get_to_timestamp_base(get_freq_code('W')[0])
6000
>>> get_to_timestamp_base(get_freq_code('M')[0])
6000
# Return second freq code against hour between second
>>> get_to_timestamp_base(get_freq_code('H')[0])
9000
>>> get_to_timestamp_base(get_freq_code('S')[0])
9000
"""
if base < FreqGroup.FR_BUS:
return FreqGroup.FR_DAY
if FreqGroup.FR_HR <= base <= FreqGroup.FR_SEC:
return FreqGroup.FR_SEC
return base
def get_freq_group(freq):
"""
Return frequency code group of given frequency str or offset.
Example
-------
>>> get_freq_group('W-MON')
4000
>>> get_freq_group('W-FRI')
4000
"""
if isinstance(freq, offsets.DateOffset):
freq = freq.rule_code
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
elif isinstance(freq, int):
pass
else:
raise ValueError('input must be str, offset or int')
return (freq // 1000) * 1000
def get_freq(freq):
"""
Return frequency code of given frequency str.
If input is not string, return input as it is.
Example
-------
>>> get_freq('A')
1000
>>> get_freq('3A')
1000
"""
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
return freq
def get_freq_code(freqstr):
"""
Return freq str or tuple to freq code and stride (mult)
Parameters
----------
freqstr : str or tuple
Returns
-------
return : tuple of base frequency code and stride (mult)
Example
-------
>>> get_freq_code('3D')
(6000, 3)
>>> get_freq_code('D')
(6000, 1)
>>> get_freq_code(('D', 3))
(6000, 3)
"""
if isinstance(freqstr, DateOffset):
freqstr = (freqstr.rule_code, freqstr.n)
if isinstance(freqstr, tuple):
if (com.is_integer(freqstr[0]) and
com.is_integer(freqstr[1])):
# e.g., freqstr = (2000, 1)
return freqstr
else:
# e.g., freqstr = ('T', 5)
try:
code = _period_str_to_code(freqstr[0])
stride = freqstr[1]
except:
if com.is_integer(freqstr[1]):
raise
code = _period_str_to_code(freqstr[1])
stride = freqstr[0]
return code, stride
if com.is_integer(freqstr):
return (freqstr, 1)
base, stride = _base_and_stride(freqstr)
code = _period_str_to_code(base)
return code, stride
def _get_freq_str(base, mult=1):
code = _reverse_period_code_map.get(base)
if mult == 1:
return code
return str(mult) + code
#----------------------------------------------------------------------
# Offset names ("time rules") and related functions
from pandas.tseries.offsets import (Nano, Micro, Milli, Second, Minute, Hour,
Day, BDay, CDay, Week, MonthBegin,
MonthEnd, BMonthBegin, BMonthEnd,
QuarterBegin, QuarterEnd, BQuarterBegin,
BQuarterEnd, YearBegin, YearEnd,
BYearBegin, BYearEnd, _make_offset
)
try:
cday = CDay()
except NotImplementedError:
cday = None
#: cache of previously seen offsets
_offset_map = {}
_offset_to_period_map = {
'WEEKDAY': 'D',
'EOM': 'M',
'BM': 'M',
'BQS': 'Q',
'QS': 'Q',
'BQ': 'Q',
'BA': 'A',
'AS': 'A',
'BAS': 'A',
'MS': 'M',
'D': 'D',
'C': 'C',
'B': 'B',
'T': 'T',
'S': 'S',
'L': 'L',
'U': 'U',
'N': 'N',
'H': 'H',
'Q': 'Q',
'A': 'A',
'W': 'W',
'M': 'M'
}
need_suffix = ['QS', 'BQ', 'BQS', 'AS', 'BA', 'BAS']
for __prefix in need_suffix:
for _m in tslib._MONTHS:
_offset_to_period_map['%s-%s' % (__prefix, _m)] = \
_offset_to_period_map[__prefix]
for __prefix in ['A', 'Q']:
for _m in tslib._MONTHS:
_alias = '%s-%s' % (__prefix, _m)
_offset_to_period_map[_alias] = _alias
_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for _d in _days:
_offset_to_period_map['W-%s' % _d] = 'W-%s' % _d
def get_period_alias(offset_str):
""" alias to closest period strings BQ->Q etc"""
return _offset_to_period_map.get(offset_str, None)
_rule_aliases = {
# Legacy rules that will continue to map to their original values
# essentially for the rest of time
'WEEKDAY': 'B',
'EOM': 'BM',
'W@MON': 'W-MON',
'W@TUE': 'W-TUE',
'W@WED': 'W-WED',
'W@THU': 'W-THU',
'W@FRI': 'W-FRI',
'W@SAT': 'W-SAT',
'W@SUN': 'W-SUN',
'Q@JAN': 'BQ-JAN',
'Q@FEB': 'BQ-FEB',
'Q@MAR': 'BQ-MAR',
'A@JAN': 'BA-JAN',
'A@FEB': 'BA-FEB',
'A@MAR': 'BA-MAR',
'A@APR': 'BA-APR',
'A@MAY': 'BA-MAY',
'A@JUN': 'BA-JUN',
'A@JUL': 'BA-JUL',
'A@AUG': 'BA-AUG',
'A@SEP': 'BA-SEP',
'A@OCT': 'BA-OCT',
'A@NOV': 'BA-NOV',
'A@DEC': 'BA-DEC',
}
_lite_rule_alias = {
'W': 'W-SUN',
'Q': 'Q-DEC',
'A': 'A-DEC', # YearEnd(month=12),
'AS': 'AS-JAN', # YearBegin(month=1),
'BA': 'BA-DEC', # BYearEnd(month=12),
'BAS': 'BAS-JAN', # BYearBegin(month=1),
'Min': 'T',
'min': 'T',
'ms': 'L',
'us': 'U'
}
#TODO: Can this be killed?
for _i, _weekday in enumerate(['MON', 'TUE', 'WED', 'THU', 'FRI']):
for _iweek in range(4):
_name = 'WOM-%d%s' % (_iweek + 1, _weekday)
_rule_aliases[_name.replace('-', '@')] = _name
# Note that _rule_aliases is not 1:1 (d[BA]==d[A@DEC]), and so traversal
# order matters when constructing an inverse. we pick one. #2331
# Used in get_legacy_offset_name
_legacy_reverse_map = dict((v, k) for k, v in
reversed(sorted(compat.iteritems(_rule_aliases))))
_name_to_offset_map = {'days': Day(1),
'hours': Hour(1),
'minutes': Minute(1),
'seconds': Second(1),
'milliseconds': Milli(1),
'microseconds': Micro(1),
'nanoseconds': Nano(1)}
def to_offset(freqstr):
"""
Return DateOffset object from string representation or
Timedelta object
Examples
--------
>>> to_offset('5Min')
Minute(5)
"""
if freqstr is None:
return None
if isinstance(freqstr, DateOffset):
return freqstr
if isinstance(freqstr, tuple):
name = freqstr[0]
stride = freqstr[1]
if isinstance(stride, compat.string_types):
name, stride = stride, name
name, _ = _base_and_stride(name)
delta = get_offset(name) * stride
elif isinstance(freqstr, timedelta):
delta = None
freqstr = Timedelta(freqstr)
try:
for name in freqstr.components._fields:
offset = _name_to_offset_map[name]
stride = getattr(freqstr.components, name)
if stride != 0:
offset = stride * offset
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError("Could not evaluate %s" % freqstr)
else:
delta = None
stride_sign = None
try:
for stride, name, _ in opattern.findall(freqstr):
offset = get_offset(name)
if stride_sign is None:
stride_sign = -1 if stride.startswith('-') else 1
if not stride:
stride = 1
stride = int(stride)
offset = offset * int(np.fabs(stride) * stride_sign)
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError("Could not evaluate %s" % freqstr)
if delta is None:
raise ValueError('Unable to understand %s as a frequency' % freqstr)
return delta
# hack to handle WOM-1MON
opattern = re.compile(r'([\-]?\d*)\s*([A-Za-z]+([\-@][\dA-Za-z\-]+)?)')
def _base_and_stride(freqstr):
"""
Return base freq and stride info from string representation
Examples
--------
_freq_and_stride('5Min') -> 'Min', 5
"""
groups = opattern.match(freqstr)
if not groups:
raise ValueError("Could not evaluate %s" % freqstr)
stride = groups.group(1)
if len(stride):
stride = int(stride)
else:
stride = 1
base = groups.group(2)
return (base, stride)
def get_base_alias(freqstr):
"""
Returns the base frequency alias, e.g., '5D' -> 'D'
"""
return _base_and_stride(freqstr)[0]
_dont_uppercase = set(('MS', 'ms'))
_LEGACY_FREQ_WARNING = 'Freq "{0}" is deprecated, use "{1}" as alternative.'
def get_offset(name):
"""
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in _dont_uppercase:
name = name.upper()
if name in _rule_aliases:
new = _rule_aliases[name]
warnings.warn(_LEGACY_FREQ_WARNING.format(name, new),
FutureWarning, stacklevel=2)
name = new
elif name.lower() in _rule_aliases:
new = _rule_aliases[name.lower()]
warnings.warn(_LEGACY_FREQ_WARNING.format(name, new),
FutureWarning, stacklevel=2)
name = new
name = _lite_rule_alias.get(name, name)
name = _lite_rule_alias.get(name.lower(), name)
else:
if name in _rule_aliases:
new = _rule_aliases[name]
warnings.warn(_LEGACY_FREQ_WARNING.format(name, new),
FutureWarning, stacklevel=2)
name = new
name = _lite_rule_alias.get(name, name)
if name not in _offset_map:
try:
# generate and cache offset
offset = _make_offset(name)
except (ValueError, TypeError, KeyError):
# bad prefix or suffix
raise ValueError('Bad rule name requested: %s.' % name)
_offset_map[name] = offset
return _offset_map[name]
getOffset = get_offset
def get_offset_name(offset):
"""
Return rule name associated with a DateOffset object
Examples
--------
get_offset_name(BMonthEnd(1)) --> 'EOM'
"""
if offset is None:
raise ValueError("Offset can't be none!")
# Hack because this is what it did before...
if isinstance(offset, BDay):
if offset.n != 1:
raise ValueError('Bad rule given: %s.' % 'BusinessDays')
else:
return offset.rule_code
try:
return offset.freqstr
except AttributeError:
# Bad offset, give useful error.
raise ValueError('Bad rule given: %s.' % offset)
def get_legacy_offset_name(offset):
"""
Return the pre pandas 0.8.0 name for the date offset
"""
# This only used in test_timeseries_legacy.py
name = offset.name
return _legacy_reverse_map.get(name, name)
def get_standard_freq(freq):
"""
Return the standardized frequency string
"""
if freq is None:
return None
if isinstance(freq, DateOffset):
return freq.rule_code
code, stride = get_freq_code(freq)
return _get_freq_str(code, stride)
#----------------------------------------------------------------------
# Period codes
# period frequency constants corresponding to scikits timeseries
# originals
_period_code_map = {
# Annual freqs with various fiscal year ends.
# eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005
"A-DEC": 1000, # Annual - December year end
"A-JAN": 1001, # Annual - January year end
"A-FEB": 1002, # Annual - February year end
"A-MAR": 1003, # Annual - March year end
"A-APR": 1004, # Annual - April year end
"A-MAY": 1005, # Annual - May year end
"A-JUN": 1006, # Annual - June year end
"A-JUL": 1007, # Annual - July year end
"A-AUG": 1008, # Annual - August year end
"A-SEP": 1009, # Annual - September year end
"A-OCT": 1010, # Annual - October year end
"A-NOV": 1011, # Annual - November year end
# Quarterly frequencies with various fiscal year ends.
# eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005
"Q-DEC": 2000, # Quarterly - December year end
"Q-JAN": 2001, # Quarterly - January year end
"Q-FEB": 2002, # Quarterly - February year end
"Q-MAR": 2003, # Quarterly - March year end
"Q-APR": 2004, # Quarterly - April year end
"Q-MAY": 2005, # Quarterly - May year end
"Q-JUN": 2006, # Quarterly - June year end
"Q-JUL": 2007, # Quarterly - July year end
"Q-AUG": 2008, # Quarterly - August year end
"Q-SEP": 2009, # Quarterly - September year end
"Q-OCT": 2010, # Quarterly - October year end
"Q-NOV": 2011, # Quarterly - November year end
"M": 3000, # Monthly
"W-SUN": 4000, # Weekly - Sunday end of week
"W-MON": 4001, # Weekly - Monday end of week
"W-TUE": 4002, # Weekly - Tuesday end of week
"W-WED": 4003, # Weekly - Wednesday end of week
"W-THU": 4004, # Weekly - Thursday end of week
"W-FRI": 4005, # Weekly - Friday end of week
"W-SAT": 4006, # Weekly - Saturday end of week
"B": 5000, # Business days
"D": 6000, # Daily
"H": 7000, # Hourly
"T": 8000, # Minutely
"S": 9000, # Secondly
"L": 10000, # Millisecondly
"U": 11000, # Microsecondly
"N": 12000, # Nanosecondly
}
_reverse_period_code_map = {}
for _k, _v in compat.iteritems(_period_code_map):
_reverse_period_code_map[_v] = _k
# Additional aliases
_period_code_map.update({
"Q": 2000, # Quarterly - December year end (default quarterly)
"A": 1000, # Annual
"W": 4000, # Weekly
})
def _period_alias_dictionary():
"""
Build freq alias dictionary to support freqs from original c_dates.c file
of the scikits.timeseries library.
"""
alias_dict = {}
M_aliases = ["M", "MTH", "MONTH", "MONTHLY"]
B_aliases = ["B", "BUS", "BUSINESS", "BUSINESSLY", 'WEEKDAY']
D_aliases = ["D", "DAY", "DLY", "DAILY"]
H_aliases = ["H", "HR", "HOUR", "HRLY", "HOURLY"]
T_aliases = ["T", "MIN", "MINUTE", "MINUTELY"]
S_aliases = ["S", "SEC", "SECOND", "SECONDLY"]
L_aliases = ["L", "ms", "MILLISECOND", "MILLISECONDLY"]
U_aliases = ["U", "US", "MICROSECOND", "MICROSECONDLY"]
N_aliases = ["N", "NS", "NANOSECOND", "NANOSECONDLY"]
for k in M_aliases:
alias_dict[k] = 'M'
for k in B_aliases:
alias_dict[k] = 'B'
for k in D_aliases:
alias_dict[k] = 'D'
for k in H_aliases:
alias_dict[k] = 'H'
for k in T_aliases:
alias_dict[k] = 'Min'
for k in S_aliases:
alias_dict[k] = 'S'
for k in L_aliases:
alias_dict[k] = 'L'
for k in U_aliases:
alias_dict[k] = 'U'
for k in N_aliases:
alias_dict[k] = 'N'
A_prefixes = ["A", "Y", "ANN", "ANNUAL", "ANNUALLY", "YR", "YEAR",
"YEARLY"]
Q_prefixes = ["Q", "QTR", "QUARTER", "QUARTERLY", "Q-E",
"QTR-E", "QUARTER-E", "QUARTERLY-E"]
month_names = [
["DEC", "DECEMBER"],
["JAN", "JANUARY"],
["FEB", "FEBRUARY"],
["MAR", "MARCH"],
["APR", "APRIL"],
["MAY", "MAY"],
["JUN", "JUNE"],
["JUL", "JULY"],
["AUG", "AUGUST"],
["SEP", "SEPTEMBER"],
["OCT", "OCTOBER"],
["NOV", "NOVEMBER"]]
seps = ["@", "-"]
for k in A_prefixes:
alias_dict[k] = 'A'
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = 'A-' + m1
alias_dict[k + sep + m2] = 'A-' + m1
for k in Q_prefixes:
alias_dict[k] = 'Q'
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = 'Q-' + m1
alias_dict[k + sep + m2] = 'Q-' + m1
W_prefixes = ["W", "WK", "WEEK", "WEEKLY"]
day_names = [
["SUN", "SUNDAY"],
["MON", "MONDAY"],
["TUE", "TUESDAY"],
["WED", "WEDNESDAY"],
["THU", "THURSDAY"],
["FRI", "FRIDAY"],
["SAT", "SATURDAY"]]
for k in W_prefixes:
alias_dict[k] = 'W'
for d_tup in day_names:
for sep in ["@", "-"]:
d1, d2 = d_tup
alias_dict[k + sep + d1] = 'W-' + d1
alias_dict[k + sep + d2] = 'W-' + d1
return alias_dict
_period_alias_dict = _period_alias_dictionary()
def _period_str_to_code(freqstr):
# hack
if freqstr in _rule_aliases:
new = _rule_aliases[freqstr]
warnings.warn(_LEGACY_FREQ_WARNING.format(freqstr, new),
FutureWarning, stacklevel=3)
freqstr = new
freqstr = _lite_rule_alias.get(freqstr, freqstr)
if freqstr not in _dont_uppercase:
lower = freqstr.lower()
if lower in _rule_aliases:
new = _rule_aliases[lower]
warnings.warn(_LEGACY_FREQ_WARNING.format(lower, new),
FutureWarning, stacklevel=3)
freqstr = new
freqstr = _lite_rule_alias.get(lower, freqstr)
try:
if freqstr not in _dont_uppercase:
freqstr = freqstr.upper()
return _period_code_map[freqstr]
except KeyError:
try:
alias = _period_alias_dict[freqstr]
warnings.warn(_LEGACY_FREQ_WARNING.format(freqstr, alias),
FutureWarning, stacklevel=3)
except KeyError:
raise ValueError("Unknown freqstr: %s" % freqstr)
return _period_code_map[alias]
def infer_freq(index, warn=True):
"""
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed.
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
if passed a Series will use the values of the series (NOT THE INDEX)
warn : boolean, default True
Returns
-------
freq : string or None
None if no discernible frequency
TypeError if the index is not datetime-like
ValueError if there are less than three values.
"""
import pandas as pd
if isinstance(index, com.ABCSeries):
values = index._values
if not (com.is_datetime64_dtype(values) or com.is_timedelta64_dtype(values) or values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype on a Series of {0}".format(index.dtype))
index = values
if com.is_period_arraylike(index):
raise TypeError("PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq.")
elif isinstance(index, pd.TimedeltaIndex):
inferer = _TimedeltaFrequencyInferer(index, warn=warn)
return inferer.get_freq()
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index type {0}".format(type(index)))
index = index.values
if not isinstance(index, pd.DatetimeIndex):
try:
index = pd.DatetimeIndex(index)
except AmbiguousTimeError:
index = pd.DatetimeIndex(index.asi8)
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
_ONE_MICRO = long(1000)
_ONE_MILLI = _ONE_MICRO * 1000
_ONE_SECOND = _ONE_MILLI * 1000
_ONE_MINUTE = 60 * _ONE_SECOND
_ONE_HOUR = 60 * _ONE_MINUTE
_ONE_DAY = 24 * _ONE_HOUR
class _FrequencyInferer(object):
"""
Not sure if I can avoid the state machine here
"""
def __init__(self, index, warn=True):
self.index = index
self.values = np.asarray(index).view('i8')
# This moves the values, which are implicitly in UTC, to the
# the timezone so they are in local time
if hasattr(index,'tz'):
if index.tz is not None:
self.values = tslib.tz_convert(self.values, 'UTC', index.tz)
self.warn = warn
if len(index) < 3:
raise ValueError('Need at least 3 dates to infer frequency')
self.is_monotonic = (self.index.is_monotonic_increasing or
self.index.is_monotonic_decreasing)
@cache_readonly
def deltas(self):
return tslib.unique_deltas(self.values)
@cache_readonly
def deltas_asi8(self):
return tslib.unique_deltas(self.index.asi8)
@cache_readonly
def is_unique(self):
return len(self.deltas) == 1
@cache_readonly
def is_unique_asi8(self):
return len(self.deltas_asi8) == 1
def get_freq(self):
if not self.is_monotonic or not self.index.is_unique:
return None
delta = self.deltas[0]
if _is_multiple(delta, _ONE_DAY):
return self._infer_daily_rule()
else:
# Business hourly, maybe. 17: one day / 65: one weekend
if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]):
return 'BH'
# Possibly intraday frequency. Here we use the
# original .asi8 values as the modified values
# will not work around DST transitions. See #8772
elif not self.is_unique_asi8:
return None
delta = self.deltas_asi8[0]
if _is_multiple(delta, _ONE_HOUR):
# Hours
return _maybe_add_count('H', delta / _ONE_HOUR)
elif _is_multiple(delta, _ONE_MINUTE):
# Minutes
return _maybe_add_count('T', delta / _ONE_MINUTE)
elif _is_multiple(delta, _ONE_SECOND):
# Seconds
return _maybe_add_count('S', delta / _ONE_SECOND)
elif _is_multiple(delta, _ONE_MILLI):
# Milliseconds
return _maybe_add_count('L', delta / _ONE_MILLI)
elif _is_multiple(delta, _ONE_MICRO):
# Microseconds
return _maybe_add_count('U', delta / _ONE_MICRO)
else:
# Nanoseconds
return _maybe_add_count('N', delta)
@cache_readonly
def day_deltas(self):
return [x / _ONE_DAY for x in self.deltas]
@cache_readonly
def hour_deltas(self):
return [x / _ONE_HOUR for x in self.deltas]
@cache_readonly
def fields(self):
return tslib.build_field_sarray(self.values)
@cache_readonly
def rep_stamp(self):
return lib.Timestamp(self.values[0])
def month_position_check(self):
# TODO: cythonize this, very slow
calendar_end = True
business_end = True
calendar_start = True
business_start = True
years = self.fields['Y']
months = self.fields['M']
days = self.fields['D']
weekdays = self.index.dayofweek
from calendar import monthrange
for y, m, d, wd in zip(years, months, days, weekdays):
if calendar_start:
calendar_start &= d == 1
if business_start:
business_start &= d == 1 or (d <= 3 and wd == 0)
if calendar_end or business_end:
_, daysinmonth = monthrange(y, m)
cal = d == daysinmonth
if calendar_end:
calendar_end &= cal
if business_end:
business_end &= cal or (daysinmonth - d < 3 and wd == 4)
elif not calendar_start and not business_start:
break
if calendar_end:
return 'ce'
elif business_end:
return 'be'
elif calendar_start:
return 'cs'
elif business_start:
return 'bs'
else:
return None
@cache_readonly
def mdiffs(self):
nmonths = self.fields['Y'] * 12 + self.fields['M']
return tslib.unique_deltas(nmonths.astype('i8'))
@cache_readonly
def ydiffs(self):
return tslib.unique_deltas(self.fields['Y'].astype('i8'))
def _infer_daily_rule(self):
annual_rule = self._get_annual_rule()
if annual_rule:
nyears = self.ydiffs[0]
month = _month_aliases[self.rep_stamp.month]
return _maybe_add_count('%s-%s' % (annual_rule, month), nyears)
quarterly_rule = self._get_quarterly_rule()
if quarterly_rule:
nquarters = self.mdiffs[0] / 3
mod_dict = {0: 12, 2: 11, 1: 10}
month = _month_aliases[mod_dict[self.rep_stamp.month % 3]]
return _maybe_add_count('%s-%s' % (quarterly_rule, month),
nquarters)
monthly_rule = self._get_monthly_rule()
if monthly_rule:
return _maybe_add_count(monthly_rule, self.mdiffs[0])
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
alias = _weekday_rule_aliases[self.rep_stamp.weekday()]
return _maybe_add_count('W-%s' % alias, days / 7)
else:
return _maybe_add_count('D', days)
# Business daily. Maybe
if self.day_deltas == [1, 3]:
return 'B'
wom_rule = self._get_wom_rule()
if wom_rule:
return wom_rule
def _get_annual_rule(self):
if len(self.ydiffs) > 1:
return None
if len(algos.unique(self.fields['M'])) > 1:
return None
pos_check = self.month_position_check()
return {'cs': 'AS', 'bs': 'BAS',
'ce': 'A', 'be': 'BA'}.get(pos_check)
def _get_quarterly_rule(self):
if len(self.mdiffs) > 1:
return None
if not self.mdiffs[0] % 3 == 0:
return None
pos_check = self.month_position_check()
return {'cs': 'QS', 'bs': 'BQS',
'ce': 'Q', 'be': 'BQ'}.get(pos_check)
def _get_monthly_rule(self):
if len(self.mdiffs) > 1:
return None
pos_check = self.month_position_check()
return {'cs': 'MS', 'bs': 'BMS',
'ce': 'M', 'be': 'BM'}.get(pos_check)
def _get_wom_rule(self):
# wdiffs = unique(np.diff(self.index.week))
#We also need -47, -49, -48 to catch index spanning year boundary
# if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all():
# return None
weekdays = unique(self.index.weekday)
if len(weekdays) > 1:
return None
week_of_months = unique((self.index.day - 1) // 7)
# Only attempt to infer up to WOM-4. See #9425
week_of_months = week_of_months[week_of_months < 4]
if len(week_of_months) == 0 or len(week_of_months) > 1:
return None
# get which week
week = week_of_months[0] + 1
wd = _weekday_rule_aliases[weekdays[0]]
return 'WOM-%d%s' % (week, wd)
import pandas.core.algorithms as algos
class _TimedeltaFrequencyInferer(_FrequencyInferer):
def _infer_daily_rule(self):
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
alias = _weekday_rule_aliases[self.rep_stamp.weekday()]
return _maybe_add_count('W-%s' % alias, days / 7)
else:
return _maybe_add_count('D', days)
def _maybe_add_count(base, count):
if count != 1:
return '%d%s' % (count, base)
else:
return base
def is_subperiod(source, target):
"""
Returns True if downsampling is possible between source and target
frequencies
Parameters
----------
source : string
Frequency converting from
target : string
Frequency converting to
Returns
-------
is_subperiod : boolean
"""
if isinstance(source, offsets.DateOffset):
source = source.rule_code
if isinstance(target, offsets.DateOffset):
target = target.rule_code
target = target.upper()
source = source.upper()
if _is_annual(target):
if _is_quarterly(source):
return _quarter_months_conform(_get_rule_month(source),
_get_rule_month(target))
return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_quarterly(target):
return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_monthly(target):
return source in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_weekly(target):
return source in [target, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'B':
return source in ['B', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'C':
return source in ['C', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'D':
return source in ['D', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'H':
return source in ['H', 'T', 'S', 'L', 'U', 'N']
elif target == 'T':
return source in ['T', 'S', 'L', 'U', 'N']
elif target == 'S':
return source in ['S', 'L', 'U', 'N']
elif target == 'L':
return source in ['L', 'U', 'N']
elif target == 'U':
return source in ['U', 'N']
elif target == 'N':
return source in ['N']
def is_superperiod(source, target):
"""
Returns True if upsampling is possible between source and target
frequencies
Parameters
----------
source : string
Frequency converting from
target : string
Frequency converting to
Returns
-------
is_superperiod : boolean
"""
if isinstance(source, offsets.DateOffset):
source = source.rule_code
if isinstance(target, offsets.DateOffset):
target = target.rule_code
target = target.upper()
source = source.upper()
if _is_annual(source):
if _is_annual(target):
return _get_rule_month(source) == _get_rule_month(target)
if _is_quarterly(target):
smonth = _get_rule_month(source)
tmonth = _get_rule_month(target)
return _quarter_months_conform(smonth, tmonth)
return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_quarterly(source):
return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_monthly(source):
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_weekly(source):
return target in [source, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'B':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'C':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'D':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'H':
return target in ['H', 'T', 'S', 'L', 'U', 'N']
elif source == 'T':
return target in ['T', 'S', 'L', 'U', 'N']
elif source == 'S':
return target in ['S', 'L', 'U', 'N']
elif source == 'L':
return target in ['L', 'U', 'N']
elif source == 'U':
return target in ['U', 'N']
elif source == 'N':
return target in ['N']
_get_rule_month = tslib._get_rule_month
def _is_annual(rule):
rule = rule.upper()
return rule == 'A' or rule.startswith('A-')
def _quarter_months_conform(source, target):
snum = _month_numbers[source]
tnum = _month_numbers[target]
return snum % 3 == tnum % 3
def _is_quarterly(rule):
rule = rule.upper()
return rule == 'Q' or rule.startswith('Q-') or rule.startswith('BQ')
def _is_monthly(rule):
rule = rule.upper()
return rule == 'M' or rule == 'BM'
def _is_weekly(rule):
rule = rule.upper()
return rule == 'W' or rule.startswith('W-')
DAYS = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
MONTHS = tslib._MONTHS
_month_numbers = tslib._MONTH_NUMBERS
_month_aliases = tslib._MONTH_ALIASES
_weekday_rule_aliases = dict((k, v) for k, v in enumerate(DAYS))
def _is_multiple(us, mult):
return us % mult == 0
|
gpl-3.0
|
soneoed/naowalkoptimiser
|
server/Localisation.theta.py
|
2
|
12012
|
""" An SIR Particle Filter based localisation system for tracking a robot with ambiguous bearing
Jason Kulk
"""
from NAO import NAO
import numpy, time
class Localisation:
THETA = 0
THETADOT = 1
STATE_LENGTH = 2
NUM_PARTICLES = 1000
def __init__(self):
""" """
self.reset = True
self.time = time.time()
self.previoustime = self.time
self.control = numpy.zeros(3) # the current control
self.previouscontrol = self.control # the previous control
self.measurement = numpy.zeros(Localisation.STATE_LENGTH) # the current measurement of the state
self.previousmeasurement = self.measurement # the previous measurement of the state
self.States = numpy.zeros((Localisation.NUM_PARTICLES, Localisation.STATE_LENGTH)) # the (states) particles
self.PreviousStates = self.States # the previous state of each particle (used for derivative calculations)
self.Weights = numpy.zeros(Localisation.NUM_PARTICLES) # the weights of each particle
self.State = self.States[0] # the estimate of the state
# Variables for the control model:
self.accelerationduration = [2.0, 2.0, 1.0] # the duration an acceleration is applied (s)
self.accelerationmagnitudes = [7.5, 5.0, 0.7] # the magnitude of the accelerations [forward, sideward, turn] (cm/s/s, rad/s)
self.accelerations = numpy.zeros((Localisation.NUM_PARTICLES, 3)) # the current acceleration (cm/s/s) for each particle
self.accelendtimes = numpy.zeros((Localisation.NUM_PARTICLES, 3)) # the times the accelerations will be set to zero given no change in control (s)
def update(self, control, nao):
""" """
self.time = time.time()
self.control = control
self.measurement = self.__naoToState(nao)
if self.reset:
self.__initParticles()
self.reset = False
else:
self.predict()
self.updateWeights()
self.resample()
self.estimateState()
self.previoustime = self.time
self.PreviousStates = self.States
def predict(self):
""" Updates each of the particles based on system and control model """
self.modelSystem()
self.modelControl()
def updateWeights(self):
""" """
# calculate the weights based on a measurement model
self.Weights = self.__gauss(self.States[:,Localisation.THETA] - self.measurement[Localisation.THETA], 0.02) + self.__gauss(self.States[:,Localisation.THETA] - (self.measurement[Localisation.THETA] - numpy.pi), 0.02)
self.Weights *= self.__gauss(self.States[:,Localisation.THETADOT] - self.measurement[Localisation.THETADOT], 0.25)
# normalise the weights so that their sum is one
sum = numpy.sum(self.Weights)
if sum != 0:
self.Weights /= sum
def resample(self):
""" """
# An SIS filter resamples only when necessary
Neff = 1.0/numpy.sum(self.Weights**2)
Ns = Localisation.NUM_PARTICLES
if Neff < 0.1*Ns:
NsInv = 1.0/Ns
c = numpy.cumsum(self.Weights)
u = NsInv*numpy.arange(Ns) + numpy.random.uniform(0, NsInv)
i = 0
#print "Pre resample:"
#print self.States[:,0:3]
for j in range(Ns):
while u[j] > c[i]:
i = i + 1
self.States[j] = self.States[i]
self.PreviousStates[j] = self.PreviousStates[i]
self.accelerations[j] = self.accelerations[i]
self.accelendtimes[j] = self.accelendtimes[i]
#print "Post resample:"
#print self.States[:,0:3]
self.Weights = NsInv*numpy.ones(Ns)
def modelSystem(self):
""" Updates each particle based on the system model """
dt = self.time - self.previoustime
sdthetadot = 0.25
self.States[:,Localisation.THETADOT] = self.PreviousStates[:,Localisation.THETADOT] + numpy.random.normal(0, sdthetadot, size=self.PreviousStates.shape[0])
self.States[:,Localisation.THETA] = self.PreviousStates[:,Localisation.THETA] + self.States[:,Localisation.THETADOT]*dt
self.States[:,Localisation.THETA] = numpy.arctan2(numpy.sin(self.States[:,Localisation.THETA]), numpy.cos(self.States[:,Localisation.THETA]))
def modelControl(self):
""" Updates each particle based on the control model """
# my model for control, is that a change in control will effect the state by
# introducing a constant acceleration over the next 1 second (2 steps)
deltacontrol = self.control - self.previouscontrol
sdtheta = 0.2 # noise on estimate of acceleration magnitude (in rad/s/s)
# put a bit of spin on the robot if the desired bearing changes
if abs(deltacontrol[1]) > 0:
self.accelerations[:,2] += (1/self.accelerationduration[2])*deltacontrol[1] + numpy.random.normal(0, sdtheta, size=self.PreviousStates.shape[0])
self.accelendtimes[:,2] = self.time + self.accelerationduration[2]
# put a bit of spin on the robot if the final orientation changes
if self.control[2] < 1000 and abs(self.control[0]) < 10 and abs(deltacontrol[2]) > 0:
if self.previouscontrol[2] > 1000:
self.accelerations[:,2] += (1/self.accelerationduration[2])*self.control[2] + numpy.random.normal(0, sdtheta, size=self.PreviousStates.shape[0])
else:
self.accelerations[:,2] += (1/self.accelerationduration[2])*deltacontrol[2] + numpy.random.normal(0, sdtheta, size=self.PreviousStates.shape[0])
self.accelendtimes[:,2] = self.time + self.accelerationduration[2]
self.accelerations = numpy.where(self.accelendtimes > self.time, self.accelerations, 0)
# calculate the controls contribution to the state velocity
self.States[:,Localisation.THETADOT] += self.accelerations[:,2]*(self.time - self.previoustime)
self.previouscontrol = self.control
def estimateState(self):
""" Updates the estimate of the state """
best = numpy.argmin(self.Weights)
beststate = self.States[best,:]
#print "Best State:", beststate
cond = (numpy.sum(numpy.fabs(self.States - beststate), axis=1) < 10)
beststates = numpy.compress(cond, self.States, axis=0)
bestweights = numpy.compress(cond, self.Weights)
#print "States", self.States
#print "States within window:", cond
#print "States close to best", beststates
#print "Weights close to best", bestweights
#print "Product:", (bestweights*beststates.T).T
self.State = numpy.sum((bestweights*beststates.T).T, axis=0)
self.State = numpy.sum((self.Weights*self.States.T).T, axis=0)
print "Estimate:", self.State
if numpy.isnan(self.State[0]):
print "FAIL"
self.__updateAttributesFromState()
def __initParticles(self):
""" Initialises self.Particles to contain Localisation.NUM_PARTICLES particles around the current measurement """
#print "Initialising Particles around", self.measurement
self.States += self.measurement
# I know for certain that at the beginning the robot is not moving, so all of the velocities should be zero. The Position however should get some noise
self.States[:,Localisation.THETA] += numpy.random.normal(0, 0.02, size=self.States.shape[0])
# now swap half of the orientations
self.States[:,Localisation.THETA] = numpy.where(numpy.random.uniform(0,1, size=self.States.shape[0]) < 0.5, self.States[:, Localisation.THETA], self.States[:, Localisation.THETA] - numpy.pi)
#print self.States
def __naoToState(self, nao):
state = numpy.zeros(Localisation.STATE_LENGTH)
if nao != None:
#state[Localisation.X] = nao.X
#state[Localisation.Y] = nao.Y
state[Localisation.THETA] = nao.Orientation
#state[Localisation.XDOT] = nao.VX
#state[Localisation.YDOT] = nao.VY
state[Localisation.THETADOT] = nao.VOrientation
#print nao.X
#self.AllX.append(nao.X)
#self.AllY.append(nao.Y)
#self.AllTheta.append(nao.Orientation)
#self.AllXdot.append(nao.VX)
#self.AllYdot.append(nao.VY)
#self.AllThetadot.append(nao.VOrientation)
#print "SDs: X", numpy.std(self.AllX), "Y", numpy.std(self.AllY), "Theta", numpy.std(self.AllTheta), "Xdot", numpy.std(self.AllXdot), "Ydot", numpy.std(self.AllYdot), "Thetadot", numpy.std(self.AllThetadot)
return state
def __updateAttributesFromState(self):
""" I have a bunch of convienent attributes for accessing the state. I need to keep them for backward compatiblity purposes. """
self.X = self.measurement[0]#self.State[Localisation.X]
self.Y = self.measurement[1]#self.State[Localisation.Y]
self.Theta = self.State[Localisation.THETA]
self.VX = 0#self.State[Localisation.XDOT]
self.VY = 0#self.State[Localisation.YDOT]
self.VTheta = self.State[Localisation.THETADOT]
self.V = 0#numpy.sqrt(self.VX**2 + self.VY**2)
def __gauss(self, x, sigma):
return (1.0/numpy.sqrt(2*numpy.pi*sigma))*numpy.exp(-(x**2)/(2*sigma**2))
if __name__ == '__main__':
import matplotlib
matplotlib.use('WXAgg')
matplotlib.rcParams['toolbar'] = 'None'
import pylab, psyco, wx
psyco.full()
x = list()
y = list()
o = list()
localisation = Localisation()
loopcount = 0
control = numpy.zeros(3)
ax = pylab.subplot(111)
canvas = ax.figure.canvas
particleplot, = pylab.plot([0,0],[0,0], marker='o', color='k', linewidth=0, markersize=2, animated=True)
estimateplot, = pylab.plot([0,0],[0,0], marker='o', animated=True)
ax.set_xlim(-200, 200)
ax.set_ylim(-200, 200)
canvas.draw()
canvas.gui_repaint()
def update_plot(*args):
""" hmm """
global control, loopcount, localisation
if update_plot.background is None:
update_plot.background = canvas.copy_from_bbox(ax.bbox)
starttime = time.time()
localisation.update(control, None)
#x.append(localisation.State[0])
#y.append(localisation.State[1])
#o.append(localisation.State[2])
loopcount += 1
if loopcount == 2:
print "Starting"
control = numpy.array([200,-0.5,0])
canvas.restore_region(update_plot.background)
estimateplot.set_data(x,y)
particleplot.set_data(100*numpy.cos(localisation.States[:,Localisation.THETA]), 100*numpy.sin(localisation.States[:,Localisation.THETA]))
ax.draw_artist(particleplot)
#ax.draw_artist(estimateplot)
canvas.blit(ax.bbox)
time.sleep(max(0,0.1 - (time.time() - starttime)))
wx.WakeUpIdle()
update_plot.background = None
wx.EVT_IDLE(wx.GetApp(), update_plot)
pylab.show()
|
gpl-3.0
|
aselims/androguard
|
elsim/elsim/elsim.py
|
37
|
16175
|
# This file is part of Elsim
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Elsim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elsim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Elsim. If not, see <http://www.gnu.org/licenses/>.
import logging
ELSIM_VERSION = 0.2
log_elsim = logging.getLogger("elsim")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
log_elsim.addHandler(console_handler)
log_runtime = logging.getLogger("elsim.runtime") # logs at runtime
log_interactive = logging.getLogger("elsim.interactive") # logs in interactive functions
log_loading = logging.getLogger("elsim.loading") # logs when loading
def set_debug():
log_elsim.setLevel( logging.DEBUG )
def get_debug():
return log_elsim.getEffectiveLevel() == logging.DEBUG
def warning(x):
log_runtime.warning(x)
def error(x):
log_runtime.error(x)
raise()
def debug(x):
log_runtime.debug(x)
from similarity.similarity import *
FILTER_ELEMENT_METH = "FILTER_ELEMENT_METH"
FILTER_CHECKSUM_METH = "FILTER_CHECKSUM_METH" # function to checksum an element
FILTER_SIM_METH = "FILTER_SIM_METH" # function to calculate the similarity between two elements
FILTER_SORT_METH = "FILTER_SORT_METH" # function to sort all similar elements
FILTER_SORT_VALUE = "FILTER_SORT_VALUE" # value which used in the sort method to eliminate not interesting comparisons
FILTER_SKIPPED_METH = "FILTER_SKIPPED_METH" # object to skip elements
FILTER_SIM_VALUE_METH = "FILTER_SIM_VALUE_METH" # function to modify values of the similarity
BASE = "base"
ELEMENTS = "elements"
HASHSUM = "hashsum"
SIMILAR_ELEMENTS = "similar_elements"
HASHSUM_SIMILAR_ELEMENTS = "hash_similar_elements"
NEW_ELEMENTS = "newelements"
HASHSUM_NEW_ELEMENTS = "hash_new_elements"
DELETED_ELEMENTS = "deletedelements"
IDENTICAL_ELEMENTS = "identicalelements"
INTERNAL_IDENTICAL_ELEMENTS = "internal identical elements"
SKIPPED_ELEMENTS = "skippedelements"
SIMILARITY_ELEMENTS = "similarity_elements"
SIMILARITY_SORT_ELEMENTS = "similarity_sort_elements"
class ElsimNeighbors(object):
def __init__(self, x, ys):
import numpy as np
from sklearn.neighbors import NearestNeighbors
#print x, ys
CI = np.array( [x.checksum.get_signature_entropy(), x.checksum.get_entropy()] )
#print CI, x.get_info()
#print
for i in ys:
CI = np.vstack( (CI, [i.checksum.get_signature_entropy(), i.checksum.get_entropy()]) )
#idx = 0
#for i in np.array(CI)[1:]:
# print idx+1, i, ys[idx].get_info()
# idx += 1
self.neigh = NearestNeighbors(2, 0.4)
self.neigh.fit(np.array(CI))
#print self.neigh.kneighbors( CI[0], len(CI) )
self.CI = CI
self.ys = ys
def cmp_elements(self):
z = self.neigh.kneighbors( self.CI[0], 5 )
l = []
cmp_values = z[0][0]
cmp_elements = z[1][0]
idx = 1
for i in cmp_elements[1:]:
#if cmp_values[idx] > 1.0:
# break
#print i, cmp_values[idx], self.ys[ i - 1 ].get_info()
l.append( self.ys[ i - 1 ] )
idx += 1
return l
def split_elements(el, els):
e1 = {}
for i in els:
e1[ i ] = el.get_associated_element( i )
return e1
####
# elements : entropy raw, hash, signature
#
# set elements : hash
# hash table elements : hash --> element
class Elsim(object):
def __init__(self, e1, e2, F, T=None, C=None, libnative=True, libpath="elsim/elsim/similarity/libsimilarity/libsimilarity.so"):
self.e1 = e1
self.e2 = e2
self.F = F
self.compressor = SNAPPY_COMPRESS
set_debug()
if T != None:
self.F[ FILTER_SORT_VALUE ] = T
if isinstance(libnative, str):
libpath = libnative
libnative = True
self.sim = SIMILARITY( libpath, libnative )
if C != None:
if C in H_COMPRESSOR:
self.compressor = H_COMPRESSOR[ C ]
self.sim.set_compress_type( self.compressor )
else:
self.sim.set_compress_type( self.compressor )
self.filters = {}
self._init_filters()
self._init_index_elements()
self._init_similarity()
self._init_sort_elements()
self._init_new_elements()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ HASHSUM ] = {}
self.filters[ IDENTICAL_ELEMENTS ] = set()
self.filters[ SIMILAR_ELEMENTS ] = []
self.filters[ HASHSUM_SIMILAR_ELEMENTS ] = []
self.filters[ NEW_ELEMENTS ] = set()
self.filters[ HASHSUM_NEW_ELEMENTS ] = []
self.filters[ DELETED_ELEMENTS ] = []
self.filters[ SKIPPED_ELEMENTS ] = []
self.filters[ ELEMENTS ][ self.e1 ] = []
self.filters[ HASHSUM ][ self.e1 ] = []
self.filters[ ELEMENTS ][ self.e2 ] = []
self.filters[ HASHSUM ][ self.e2 ] = []
self.filters[ SIMILARITY_ELEMENTS ] = {}
self.filters[ SIMILARITY_SORT_ELEMENTS ] = {}
self.set_els = {}
self.ref_set_els = {}
self.ref_set_ident = {}
def _init_index_elements(self):
self.__init_index_elements( self.e1, 1 )
self.__init_index_elements( self.e2 )
def __init_index_elements(self, ce, init=0):
self.set_els[ ce ] = set()
self.ref_set_els[ ce ] = {}
self.ref_set_ident[ce] = {}
for ae in ce.get_elements():
e = self.filters[BASE][FILTER_ELEMENT_METH]( ae, ce )
if self.filters[BASE][FILTER_SKIPPED_METH].skip( e ):
self.filters[ SKIPPED_ELEMENTS ].append( e )
continue
self.filters[ ELEMENTS ][ ce ].append( e )
fm = self.filters[ BASE ][ FILTER_CHECKSUM_METH ]( e, self.sim )
e.set_checksum( fm )
sha256 = e.getsha256()
self.filters[ HASHSUM ][ ce ].append( sha256 )
if sha256 not in self.set_els[ ce ]:
self.set_els[ ce ].add( sha256 )
self.ref_set_els[ ce ][ sha256 ] = e
self.ref_set_ident[ce][sha256] = []
self.ref_set_ident[ce][sha256].append(e)
def _init_similarity(self):
intersection_elements = self.set_els[ self.e2 ].intersection( self.set_els[ self.e1 ] )
difference_elements = self.set_els[ self.e2 ].difference( intersection_elements )
self.filters[IDENTICAL_ELEMENTS].update([ self.ref_set_els[ self.e1 ][ i ] for i in intersection_elements ])
available_e2_elements = [ self.ref_set_els[ self.e2 ][ i ] for i in difference_elements ]
# Check if some elements in the first file has been modified
for j in self.filters[ELEMENTS][self.e1]:
self.filters[ SIMILARITY_ELEMENTS ][ j ] = {}
#debug("SIM FOR %s" % (j.get_info()))
if j.getsha256() not in self.filters[HASHSUM][self.e2]:
#eln = ElsimNeighbors( j, available_e2_elements )
#for k in eln.cmp_elements():
for k in available_e2_elements:
#debug("%s" % k.get_info())
self.filters[SIMILARITY_ELEMENTS][ j ][ k ] = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
if j.getsha256() not in self.filters[HASHSUM_SIMILAR_ELEMENTS]:
self.filters[SIMILAR_ELEMENTS].append(j)
self.filters[HASHSUM_SIMILAR_ELEMENTS].append( j.getsha256() )
def _init_sort_elements(self):
deleted_elements = []
for j in self.filters[SIMILAR_ELEMENTS]:
#debug("SORT FOR %s" % (j.get_info()))
sort_h = self.filters[BASE][FILTER_SORT_METH]( j, self.filters[SIMILARITY_ELEMENTS][ j ], self.filters[BASE][FILTER_SORT_VALUE] )
self.filters[SIMILARITY_SORT_ELEMENTS][ j ] = set( i[0] for i in sort_h )
ret = True
if sort_h == []:
ret = False
if ret == False:
deleted_elements.append( j )
for j in deleted_elements:
self.filters[ DELETED_ELEMENTS ].append( j )
self.filters[ SIMILAR_ELEMENTS ].remove( j )
def __checksort(self, x, y):
return y in self.filters[SIMILARITY_SORT_ELEMENTS][ x ]
def _init_new_elements(self):
# Check if some elements in the second file are totally new !
for j in self.filters[ELEMENTS][self.e2]:
# new elements can't be in similar elements
if j not in self.filters[SIMILAR_ELEMENTS]:
# new elements hashes can't be in first file
if j.getsha256() not in self.filters[HASHSUM][self.e1]:
ok = True
# new elements can't be compared to another one
for diff_element in self.filters[SIMILAR_ELEMENTS]:
if self.__checksort( diff_element, j ):
ok = False
break
if ok:
if j.getsha256() not in self.filters[HASHSUM_NEW_ELEMENTS]:
self.filters[NEW_ELEMENTS].add( j )
self.filters[HASHSUM_NEW_ELEMENTS].append( j.getsha256() )
def get_similar_elements(self):
""" Return the similar elements
@rtype : a list of elements
"""
return self.get_elem( SIMILAR_ELEMENTS )
def get_new_elements(self):
""" Return the new elements
@rtype : a list of elements
"""
return self.get_elem( NEW_ELEMENTS )
def get_deleted_elements(self):
""" Return the deleted elements
@rtype : a list of elements
"""
return self.get_elem( DELETED_ELEMENTS )
def get_internal_identical_elements(self, ce):
""" Return the internal identical elements
@rtype : a list of elements
"""
return self.get_elem( INTERNAL_IDENTICAL_ELEMENTS )
def get_identical_elements(self):
""" Return the identical elements
@rtype : a list of elements
"""
return self.get_elem( IDENTICAL_ELEMENTS )
def get_skipped_elements(self):
return self.get_elem( SKIPPED_ELEMENTS )
def get_elem(self, attr):
return [ x for x in self.filters[attr] ]
def show_element(self, i, details=True):
print "\t", i.get_info()
if details:
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
if len(self.ref_set_ident[self.e2][i.getsha256()]) > 1:
for ident in self.ref_set_ident[self.e2][i.getsha256()]:
print "\t\t-->", ident.get_info()
else:
print "\t\t-->", self.ref_set_els[self.e2][ i.getsha256() ].get_info()
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
print "\t\t-->", j.get_info(), self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ]
def get_element_info(self, i):
l = []
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
l.append( [ i, self.ref_set_els[self.e2][ i.getsha256() ] ] )
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
l.append( [i, j, self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ] ] )
return l
def get_associated_element(self, i):
return list(self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ])[0]
def get_similarity_value(self, new=True):
values = []
self.sim.set_compress_type( BZ2_COMPRESS )
for j in self.filters[SIMILAR_ELEMENTS]:
k = self.get_associated_element( j )
value = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
# filter value
value = self.filters[BASE][FILTER_SIM_VALUE_METH]( value )
values.append( value )
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 0.0 ) for i in self.filters[IDENTICAL_ELEMENTS] ] )
if new == True:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[NEW_ELEMENTS] ] )
else:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[DELETED_ELEMENTS] ] )
self.sim.set_compress_type( self.compressor )
similarity_value = 0.0
for i in values:
similarity_value += (1.0 - i)
if len(values) == 0:
return 0.0
return (similarity_value/len(values)) * 100
def show(self):
print "Elements:"
print "\t IDENTICAL:\t", len(self.get_identical_elements())
print "\t SIMILAR: \t", len(self.get_similar_elements())
print "\t NEW:\t\t", len(self.get_new_elements())
print "\t DELETED:\t", len(self.get_deleted_elements())
print "\t SKIPPED:\t", len(self.get_skipped_elements())
#self.sim.show()
ADDED_ELEMENTS = "added elements"
DELETED_ELEMENTS = "deleted elements"
LINK_ELEMENTS = "link elements"
DIFF = "diff"
class Eldiff(object):
def __init__(self, elsim, F):
self.elsim = elsim
self.F = F
self._init_filters()
self._init_diff()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ ADDED_ELEMENTS ] = {}
self.filters[ DELETED_ELEMENTS ] = {}
self.filters[ LINK_ELEMENTS ] = {}
def _init_diff(self):
for i, j in self.elsim.get_elements():
self.filters[ ADDED_ELEMENTS ][ j ] = []
self.filters[ DELETED_ELEMENTS ][ i ] = []
x = self.filters[ BASE ][ DIFF ]( i, j )
self.filters[ ADDED_ELEMENTS ][ j ].extend( x.get_added_elements() )
self.filters[ DELETED_ELEMENTS ][ i ].extend( x.get_deleted_elements() )
self.filters[ LINK_ELEMENTS ][ j ] = i
#self.filters[ LINK_ELEMENTS ][ i ] = j
def show(self):
for bb in self.filters[ LINK_ELEMENTS ] : #print "la"
print bb.get_info(), self.filters[ LINK_ELEMENTS ][ bb ].get_info()
print "Added Elements(%d)" % (len(self.filters[ ADDED_ELEMENTS ][ bb ]))
for i in self.filters[ ADDED_ELEMENTS ][ bb ]:
print "\t",
i.show()
print "Deleted Elements(%d)" % (len(self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]))
for i in self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]:
print "\t",
i.show()
print
def get_added_elements(self):
return self.filters[ ADDED_ELEMENTS ]
def get_deleted_elements(self):
return self.filters[ DELETED_ELEMENTS ]
|
apache-2.0
|
apapadopoulos/MultiCoreMigrationSimulator
|
mcms.py
|
1
|
9483
|
#!/usr/bin/python
from __future__ import division
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import argparse
import os
import sys
import libs.Process as proc
import libs.Controller as ctrl
import libs.Scheduler as sched
import libs.Migration as mig
import libs.Utils as ut
import libs.Tests as tst
def main():
## TODOs:
# - add argv with numCores, numThreads, tFin from commandLine
# - add print on a file results
# - re-write for loops in a more efficient way
## Manage command line inputs
# Defining command line options to find out the algorithm
parser = argparse.ArgumentParser( \
description='Run multicore migration simulator.', \
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
migrAlgos = ("simple load_aware load_normalized turn_over").split()
parser.add_argument('--migration',
help = 'Migration algorithm: ' + ' '.join(migrAlgos),
default = migrAlgos[0])
parser.add_argument('--outdir',
help = 'Destination folder for results and logs',
default = './results/')
parser.add_argument('--simTime',
type = int,
help = 'Simulation time.',
default = 1000)
parser.add_argument('--numThreads',
type = int,
help = 'Number of threads.',
default = 500)
parser.add_argument('--numCores',
type = int,
help = 'Number of threads.',
default = 8)
parser.add_argument('--utilizationSetPoint',
type = float,
help = 'Utilization setpoint.',
default = 0.5)
parser.add_argument('--relocationThreshold',
type = float,
help = 'Relocation threashold.',
default = 0.5)
parser.add_argument('--deltaSP',
type = int,
help = 'Set point update period (Valid only with load_normalized!).',
default = 10)
parser.add_argument('--deltaTO',
type = int,
help = 'Set point update period (Valid only with turn_over!).',
default = 50)
parser.add_argument('--minL',
type = float,
help = 'Set point update period (Valid only with turn_over!).',
default = 0.1)
parser.add_argument('--padding',
type = float,
help = 'Padding for the adaptation of the set point (Valid only with load_normalized!).',
default = 1.0)
parser.add_argument('--startupTime',
type = int,
help = 'Time needed for the system to startup.',
default = 0)
parser.add_argument('--plot',
type = int,
help = 'Option to show graphs or not.',
default = 0)
parser.add_argument('--save',
type = int,
help = 'Options to save data or not.',
default = 0)
parser.add_argument('--verb',
type = int,
help = 'Options to have a verbose execution.',
default = 0)
parser.add_argument('--scenario',
type = int,
help = 'Different initial conditions. 0: First core overloaded,\
1: Threads spreaded among the cores,\
2: Threads spreaded among the cores, but last empty.',
default = 0)
# Parsing the command line inputs
args = parser.parse_args()
# Migration algorithm
migration = args.migration
if migration not in migrAlgos:
print "Unsupported algorithm %s"%format(migration)
parser.print_help()
quit()
# Creating the directory where to store the results
ut.mkdir_p(args.outdir)
##############################
## The program starts here
##############################
numCores = args.numCores
numThreads = args.numThreads
tFin = args.simTime
## Creating numThreads threads
Threads = []
alphas = []
for i in xrange(0,numThreads):
alpha = 0.5*numCores/numThreads
alphas.append(alpha)
ut.addProcess(Threads,ident=i, alpha=alpha,stdDev=0.001)
# Threads[i].viewProcess()
alphas = np.array(alphas)
## Creating one scheduler for each core
Schedulers = []
tauro = np.zeros(numCores)
for i in xrange(0,numCores):
Schedulers.append(sched.IplusPI(ident=i, Kiin=0.25, Kpout=2.0, Kiout=0.25))
tauro[i] = 1
## Creating a migration manager
# Migration data
utilizationSetPoint = args.utilizationSetPoint * np.ones(numCores) # utilization set point for each core
relocationThresholds = args.relocationThreshold * np.ones(numCores) # Thresholds
DeltaSP = args.deltaSP
DeltaTO = args.deltaTO
minL = args.minL
mm = mig.MigrationManager(numCores, relocationThresholds, minLoad=minL, padding=args.padding, verb=False)
placement_matrix = np.zeros((numThreads, numCores)); # how the threads are partitioned among the different cores
if args.scenario == 0:
## The threads start all on the first core
placement_matrix[:,0] = 1;
elif args.scenario == 1:
## The threads are equally spread among the cores
threadPerCore = np.ceil((1.*numThreads)/numCores)
for cc in xrange(0,numCores-1):
placement_matrix[cc*threadPerCore:(cc+1)*threadPerCore,cc] = 1
else:
placement_matrix[(numCores-1)*threadPerCore:,numCores-1] = 1
elif args.scenario == 2:
## The threads are equally spread among the cores. The last core is empty.
threadPerCore = np.ceil((1.*numThreads)/numCores)
for cc in xrange(0,numCores-1):
placement_matrix[cc*threadPerCore:(cc+1)*threadPerCore,cc] = 1
else:
placement_matrix[(numCores-2)*threadPerCore:,numCores-2] = 1
else:
placement_matrix
vkk = np.zeros((tFin,1))
vSP = np.zeros((tFin,numCores))
vUn = np.zeros((tFin,numCores))
vU = np.zeros((tFin,numCores))
vmig= np.zeros((tFin,1))
vOI = np.zeros((tFin,numCores))
## Starting the simulation
print '[%s] started with:\n\tnumCores=%d,\n\tnumThreads=%d,\n\tpadding=%f,\n\trelocationThreshold=%f,\n\ttFin=%d...'%\
(args.migration,\
args.numCores,\
args.numThreads,\
args.padding,\
args.relocationThreshold,\
args.simTime)
for kk in xrange(1,tFin+1):
if args.verb:
ut.progress(kk,tFin, bar_length=20)
vkk[kk-1,:] = kk
for cc in xrange(0,numCores):
# Extracting the subset of tasks to be scheduled
subset_idx = np.nonzero(placement_matrix[:,cc])[0]
subset_Threads = [Threads[i] for i in subset_idx]
# Schedule the subset of tasks
taur, taut, tauto = Schedulers[cc].schedule(subset_Threads,tauro[cc])
#Schedulers[cc].viewUtilization()
vU[kk-1,cc] = Schedulers[cc].getUtilization()
vUn[kk-1,cc] = Schedulers[cc].getNominalUtilization()
vOI[kk-1,:] = mm.getOverloadIndex()
if kk > args.startupTime:
# Apply migration algorithm
if migration=='simple':
placement_matrix = mm.migration_simple(Schedulers, placement_matrix,utilizationSetPoint)
elif migration=='load_aware':
placement_matrix = mm.migration_load_aware(Schedulers, placement_matrix,utilizationSetPoint,alphas)
elif migration=='load_normalized':
# If DeltaSP is elapsed, update the utilization set point
if np.mod(kk,DeltaSP)==0:
utilizationSetPoint = mm.normalize_load(Schedulers)
else:
mm.average_load(Schedulers,method=1)
placement_matrix = mm.migration_load_aware(Schedulers, placement_matrix,utilizationSetPoint,alphas)
elif migration=='turn_over':
# If DeltaSP is elapsed, update the utilization set point
if np.mod(kk,DeltaTO)==0:
utilizationSetPoint = mm.turn_over_load(Schedulers,minLoad=minL)
#else:
# mm.average_load(Schedulers,method=1)
placement_matrix = mm.migration_load_aware(Schedulers, placement_matrix,utilizationSetPoint,alphas)
# Saving the utilization setpoint
vSP[kk-1,:] = utilizationSetPoint
vmig[kk-1,:] = mm.getTotalMigrations()
if args.verb:
print '\nSimulation finished!\n'
mm.viewTotalMigrations()
print '[%s] finished sim with:\n\tnumCores=%d,\n\tnumThreads=%d,\n\tpadding=%f,\n\trelocationThreshold=%f,\n\ttFin=%d...'%\
(args.migration,\
args.numCores,\
args.numThreads,\
args.padding,\
args.relocationThreshold,\
args.simTime)
mm.viewTotalMigrations()
if args.plot:
plt.figure(1)
plt.plot(xrange(0,tFin),vU)
plt.legend(['Core'+str(i) for i in xrange(0,numCores)])
plt.plot(xrange(0,tFin),vSP,'--')
plt.ylim((0,1.2))
plt.title('Actual utilization')
plt.xlabel('Scheduling rounds')
plt.ylabel('Utilization [Perc.]')
plt.figure(2)
plt.plot(xrange(0,tFin),vUn)
plt.plot(xrange(0,tFin),vSP,'--')
plt.ylim((0,1.2))
plt.title('Nominal utilization')
plt.xlabel('Scheduling rounds')
plt.ylabel('Utilization [Perc.]')
plt.figure(3)
plt.plot(xrange(0,tFin),vOI)
plt.plot(xrange(0,tFin),args.relocationThreshold*np.ones(tFin),'k--')
plt.ylim((0,args.relocationThreshold*1.5))
plt.title('Overload index')
plt.xlabel('Scheduling rounds')
plt.ylabel('Overload index')
plt.show()
if args.save:
# Saving results in the outdir directory
header = 'Round,'
for cc in xrange(0,numCores):
header += 'SetPointUtilization'+str(cc)+','
for cc in xrange(0,numCores):
header += 'NominalUtilizationCore'+str(cc)+','
for cc in xrange(0,numCores):
header += 'UtilizationCore'+str(cc)+','
for cc in xrange(0,numCores):
header += 'OverloadIndex'+str(cc)+','
header += 'TotalMigrations'
M = np.hstack((vkk,vSP,vUn,vU,vOI,vmig))
ut.save_results(args.outdir+'results_'\
+migration+'_'\
+'numCores'+str(numCores)+'_'\
+'numThreads'+str(numThreads)+'_'\
+'padding'+str(args.padding)+'_'\
+'relocationThreshold'+str(args.relocationThreshold)\
+'.csv', M, header=header)
def tests():
tFin = 500
numThreads=50
tst.testInnerLoop(tFin);
tst.testSchedulerAddRemoveThreads(tFin,numThreads)
tst.testSchedulerWithInternalDataPlot(tFin,numThreads)
tst.testSchedulerNoThreads(tFin)
if __name__ == "__main__":
sys.exit(main())
|
gpl-2.0
|
DigitalSlideArchive/HistomicsTK
|
histomicstk/annotations_and_masks/review_gallery.py
|
1
|
15303
|
import io
import os
import tempfile
try:
import matplotlib.pyplot as plt
except ImportError:
pass
import numpy as np
import pyvips
from PIL import Image
from imageio import imwrite
from pandas import DataFrame
from histomicstk.annotations_and_masks.annotation_and_mask_utils import \
get_scale_factor_and_appendStr, get_image_from_htk_response
from histomicstk.annotations_and_masks.annotations_to_masks_handler import \
_visualize_annotations_on_rgb
from histomicstk.annotations_and_masks.annotations_to_object_mask_handler \
import get_all_rois_from_slide_v2
from histomicstk.workflows.workflow_runner import Workflow_runner, \
Slide_iterator
from histomicstk.annotations_and_masks.masks_to_annotations_handler import \
get_annotation_documents_from_contours
# %============================================================================
# CONSTANTS
# source: https://libvips.github.io/libvips/API/current/Examples.md.html
# source 2: https://libvips.github.io/libvips/API/current/Examples.md.html
# source 3: https://github.com/libvips/pyvips/issues/109
# source 4: https://github.com/libvips/libvips/issues/1254
# map np dtypes to vips
DTYPE_TO_FORMAT = {
'uint8': 'uchar',
'int8': 'char',
'uint16': 'ushort',
'int16': 'short',
'uint32': 'uint',
'int32': 'int',
'float32': 'float',
'float64': 'double',
'complex64': 'complex',
'complex128': 'dpcomplex',
}
# map vips formats to np dtypes
FORMAT_TO_DTYPE = {
'uchar': np.uint8,
'char': np.int8,
'ushort': np.uint16,
'short': np.int16,
'uint': np.uint32,
'int': np.int32,
'float': np.float32,
'double': np.float64,
'complex': np.complex64,
'dpcomplex': np.complex128,
}
# %============================================================================
def get_all_rois_from_folder_v2(
gc, folderid, get_all_rois_kwargs, monitor=''):
"""Get all rois in a girder folder using get_all_rois_from_slide_v2().
Parameters
----------
gc : girder_client.Girder_Client
authenticated girder client
folderid : str
girder id of folder
get_all_rois_kwargs : dict
kwargs to pass to get_all_rois_from_slide_v2()
monitor : str
monitor prefix
Returns
-------
None
"""
def _get_all_rois(slide_id, monitorPrefix, **kwargs):
sld = gc.get('/item/%s' % slide_id)
if "." not in sld['name']:
sld['name'] += "."
sldname = sld['name'][:sld['name'].find('.')].replace('/', '_#_')
return get_all_rois_from_slide_v2(
slide_id=slide_id, monitorprefix=monitorPrefix,
# encoding slide id makes things easier later
slide_name="%s_id-%s" % (sldname, slide_id),
**kwargs)
# update with params
get_all_rois_kwargs['gc'] = gc
# pull annotations for each slide in folder
workflow_runner = Workflow_runner(
slide_iterator=Slide_iterator(
gc, source_folder_id=folderid,
keep_slides=None,
),
workflow=_get_all_rois,
workflow_kwargs=get_all_rois_kwargs,
monitorPrefix=monitor
)
workflow_runner.run()
# %============================================================================
def _get_visualization_zoomout(
gc, slide_id, bounds, MPP, MAG, zoomout=4):
"""Get a zoomed out visualization of ROI RGB and annotation overlay.
Parameters
----------
gc : girder_client.Girder_Client
authenticated girder client
slide_id : str
girder ID of slide
bounds : dict
bounds of the region of interest. Must contain the keys
XMIN, XMAX, YMIN, YMAX
MPP : float
Microns per pixel.
MAG : float
Magnification. MPP overrides this.
zoomout : float
how much to zoom out
Returns
-------
np.array
Zoomed out visualization. Outpu from _visualize_annotations_on_rgb().
"""
# get append string for server request
if MPP is not None:
getsf_kwargs = {
'MPP': MPP * (zoomout + 1),
'MAG': None,
}
elif MAG is not None:
getsf_kwargs = {
'MPP': None,
'MAG': MAG / (zoomout + 1),
}
else:
getsf_kwargs = {
'MPP': None,
'MAG': None,
}
sf, appendStr = get_scale_factor_and_appendStr(
gc=gc, slide_id=slide_id, **getsf_kwargs)
# now get low-magnification surrounding field
x_margin = (bounds['XMAX'] - bounds['XMIN']) * zoomout / 2
y_margin = (bounds['YMAX'] - bounds['YMIN']) * zoomout / 2
getStr = \
"/item/%s/tiles/region?left=%d&right=%d&top=%d&bottom=%d" \
% (slide_id,
max(0, bounds['XMIN'] - x_margin),
bounds['XMAX'] + x_margin,
max(0, bounds['YMIN'] - y_margin),
bounds['YMAX'] + y_margin)
getStr += appendStr
resp = gc.get(getStr, jsonResp=False)
rgb_zoomout = get_image_from_htk_response(resp)
# plot a bounding box at the ROI region
xmin = x_margin * sf
xmax = xmin + (bounds['XMAX'] - bounds['XMIN']) * sf
ymin = y_margin * sf
ymax = ymin + (bounds['YMAX'] - bounds['YMIN']) * sf
xmin, xmax, ymin, ymax = [str(int(j)) for j in (xmin, xmax, ymin, ymax)]
contours_list = [{
'color': 'rgb(255,255,0)',
'coords_x': ",".join([xmin, xmax, xmax, xmin, xmin]),
'coords_y': ",".join([ymin, ymin, ymax, ymax, ymin]),
}]
return _visualize_annotations_on_rgb(rgb_zoomout, contours_list)
def _get_review_visualization(rgb, vis, vis_zoomout):
"""Get a visualization of rgb and annotations for rapid review.
Parameters
----------
rgb : np.array
mxnx3 rgb image
vis : np.array
visualization of rgb with overlayed annotations
vis_zoomout
same as vis, but at a lower magnififcation.
Returns
-------
np.array
visualization to be used for gallery
"""
wmax = max(vis.shape[1], vis_zoomout.shape[1])
hmax = max(vis.shape[0], vis_zoomout.shape[0])
fig, ax = plt.subplots(
1, 3, dpi=100,
figsize=(3 * wmax / 1000, hmax / 1000),
gridspec_kw={'wspace': 0.01, 'hspace': 0}
)
ax[0].imshow(vis)
ax[1].imshow(rgb)
ax[2].imshow(vis_zoomout)
for axis in ax:
axis.axis('off')
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
buf = io.BytesIO()
plt.savefig(buf, format='png', pad_inches=0, dpi=1000)
buf.seek(0)
combined_vis = np.uint8(Image.open(buf))[..., :3]
plt.close()
return combined_vis
def _plot_rapid_review_vis(
roi_out, gc, slide_id, slide_name, MPP, MAG,
combinedvis_savepath, zoomout=4,
verbose=False, monitorprefix=''):
"""Plot a visualization for rapid review of ROI.
This is a callback to be called inside get_all_rois_from_slide_v2().
Parameters
----------
roi_out : dict
output from annotations_to_contours_no_mask()
gc : girder_client.Girder_Client
authenticated girder client
slide_id : str
girder slide id
slide_name : str
name of the slide
MPP : float
microns per pixel
MAG : float
magnification. superceded by MPP.
combinedvis_savepath : str
path to save the combined visualization
zoomout : float
how much to zoom out to get the gallery visualization
verbose : bool
print statements to screen
monitorprefix : str
text to prepent to printed statements
Returns
-------
dict
roi_out parameter whether or not it is modified
"""
# get rgb and visualization (fetched mag + lower mag)
vis_zoomout = _get_visualization_zoomout(
gc=gc, slide_id=slide_id, bounds=roi_out['bounds'],
MPP=MPP, MAG=MAG, zoomout=zoomout)
# combined everything in a neat visualization for rapid review
ROINAMESTR = "%s_left-%d_top-%d_bottom-%d_right-%d" % (
slide_name,
roi_out['bounds']['XMIN'], roi_out['bounds']['YMIN'],
roi_out['bounds']['YMAX'], roi_out['bounds']['XMAX'])
savename = os.path.join(combinedvis_savepath, ROINAMESTR + ".png")
rapid_review_vis = _get_review_visualization(
rgb=roi_out['rgb'], vis=roi_out['visualization'],
vis_zoomout=vis_zoomout)
# save visualization for later use
if verbose:
print("%s: Saving %s" % (monitorprefix, savename))
imwrite(im=rapid_review_vis, uri=savename)
return roi_out
# %============================================================================
def create_review_galleries(
tilepath_base, upload_results=True, gc=None,
gallery_savepath=None, gallery_folderid=None,
padding=25, tiles_per_row=2, tiles_per_column=5,
annprops=None, url=None, nameprefix=''):
"""Create and or post review galleries for rapid review.
Parameters
----------
tilepath_base : str
directory where combined visualization.
upload_results : bool
upload results to DSA?
gc : girder_client.Girder_Client
authenticated girder client. Only needed upload_results.
gallery_savepath : str
directory to save gallery. Only if upload_results.
gallery_folderid : str
girder ID of folder to post galleries. Only if upload_result.
padding : int
padding in pixels between tiles in same gallery.
tiles_per_row : int
how many visualization tiles per row in gallery.
tiles_per_column : int
how many visualization tiles per column in gallery.
annprops : dict
properties of the annotations to be posted to DSA. Passed directly
as annprops to get_annotation_documents_from_contours()
url : str
url of the Digital Slide Archive Instance. For example:
http://candygram.neurology.emory.edu:8080/
nameprefix : str
prefix to prepend to gallery name
Returns
-------
list
each entry is a dict representing the response of the server
post request to upload the gallery to DSA.
"""
if upload_results:
for par in ('gc', 'gallery_folderid', 'url'):
if locals()[par] is None:
raise Exception(
"%s cannot be None if upload_results!" % par)
if gallery_savepath is None:
gallery_savepath = tempfile.mkdtemp(prefix='gallery-')
savepaths = []
resps = []
tile_paths = [
os.path.join(tilepath_base, j) for j in
os.listdir(tilepath_base) if j.endswith('.png')]
tile_paths.sort()
def _parse_tilepath(tpath):
basename = os.path.basename(tpath)
basename = basename[:basename.rfind('.')]
tileinfo = {'slide_name': basename.split('_')[0]}
for attrib in ['id', 'left', 'top', 'bottom', 'right']:
tileinfo[attrib] = basename.split(
attrib + '-')[1].split('_')[0]
# add URL in histomicsTK
tileinfo['URL'] = url + \
"histomicstk#?image=%s&bounds=%s%%2C%s%%2C%s%%2C%s%%2C0" % (
tileinfo['id'],
tileinfo['left'], tileinfo['top'],
tileinfo['right'], tileinfo['bottom'])
return tileinfo
n_tiles = len(tile_paths)
n_galleries = int(np.ceil(n_tiles / (tiles_per_row * tiles_per_column)))
tileidx = 0
for galno in range(n_galleries):
# this makes a 8-bit, mono image (initializes as 1x1x3 matrix)
im = pyvips.Image.black(1, 1, bands=3)
# this will store the roi contours
contours = []
for row in range(tiles_per_column):
rowpos = im.height + padding
# initialize "row" strip image
row_im = pyvips.Image.black(1, 1, bands=3)
for col in range(tiles_per_row):
if tileidx == n_tiles:
break
tilepath = tile_paths[tileidx]
print("Inserting tile %d of %d: %s" % (
tileidx, n_tiles, tilepath))
tileidx += 1
# # get tile from file
tile = pyvips.Image.new_from_file(
tilepath, access="sequential")
# insert tile into mosaic row
colpos = row_im.width + padding
row_im = row_im.insert(
tile[:3], colpos, 0, expand=True, background=255)
if upload_results:
tileinfo = _parse_tilepath(tilepath)
xmin = colpos
ymin = rowpos
xmax = xmin + tile.width
ymax = ymin + tile.height
xmin, xmax, ymin, ymax = [
str(j) for j in (xmin, xmax, ymin, ymax)]
contours.append({
'group': tileinfo['slide_name'],
'label': tileinfo['URL'],
'color': 'rgb(0,0,0)',
'coords_x': ",".join([xmin, xmax, xmax, xmin, xmin]),
'coords_y': ",".join([ymin, ymin, ymax, ymax, ymin]),
})
# Add a small contour so that when the pathologist
# changes the label to approve or disapprove of the
# FOV, the URL in THIS contour (a link to the original
# FOV) can be used. We place it in the top right corner.
boxsize = 25
xmin = str(int(xmax) - boxsize)
ymax = str(int(ymin) + boxsize)
contours.append({
'group': tileinfo['slide_name'],
'label': tileinfo['URL'],
'color': 'rgb(0,0,0)',
'coords_x': ",".join([xmin, xmax, xmax, xmin, xmin]),
'coords_y': ",".join([ymin, ymin, ymax, ymax, ymin]),
})
# insert row into main gallery
im = im.insert(row_im, 0, rowpos, expand=True, background=255)
filename = '%s_gallery-%d' % (nameprefix, galno + 1)
savepath = os.path.join(gallery_savepath, filename + '.tiff')
print("Saving gallery %d of %d to %s" % (
galno + 1, n_galleries, savepath))
# save temporarily to disk to be uploaded
im.tiffsave(
savepath, tile=True, tile_width=256, tile_height=256, pyramid=True)
if upload_results:
# upload the gallery to DSA
resps.append(gc.uploadFileToFolder(
folderId=gallery_folderid, filepath=savepath,
filename=filename))
os.remove(savepath)
# get and post FOV location annotations
annotation_docs = get_annotation_documents_from_contours(
DataFrame(contours), separate_docs_by_group=True,
annprops=annprops)
for doc in annotation_docs:
_ = gc.post(
"/annotation?itemId=" + resps[-1]['itemId'], json=doc)
else:
savepaths.append(savepath)
return resps if upload_results else savepaths
# %============================================================================
|
apache-2.0
|
wchan/tensorflow
|
speech4/data/wsj.py
|
1
|
9979
|
#!/usr/bin/env python
################################################################################
# Copyright 2016 William Chan <[email protected]>.
################################################################################
import argparse
import google
import itertools
import kaldi_io
import matplotlib
matplotlib.use('Agg')
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import os
import re
import os
import string
import sys
import tensorflow as tf
import tensorflow.core.framework.token_model_pb2 as token_model_pb2
def main():
parser = argparse.ArgumentParser(description='SPEECH3 (C) 2015 William Chan <[email protected]>')
parser.add_argument('--kaldi_scp', type=str)
parser.add_argument('--kaldi_txt', type=str)
parser.add_argument('--tf_records', type=str)
args = vars(parser.parse_args())
convert(args['kaldi_scp'], args['kaldi_txt'], args['tf_records'])
def token_model_add_token(token_model_proto, token_id, token_string):
token = token_model_proto.tokens.add()
token.token_id = int(token_id)
token.token_string = str(token_string)
def create_token_model():
token_model_proto = token_model_pb2.TokenModelProto()
token_model_proto.token_sos = 0
token_model_proto.token_string_sos = "<S>"
token_model_add_token(
token_model_proto, token_model_proto.token_sos,
token_model_proto.token_string_sos)
token_model_proto.token_eos = 1
token_model_proto.token_string_eos = "</S>"
token_model_add_token(
token_model_proto, token_model_proto.token_eos,
token_model_proto.token_string_eos)
token_model_proto.token_eow = 2
token_model_proto.token_string_eow = " "
token_model_add_token(
token_model_proto, token_model_proto.token_eow,
token_model_proto.token_string_eow)
token_model_proto.token_unk = 3
token_model_proto.token_string_unk = "<UNK>"
token_model_add_token(
token_model_proto, token_model_proto.token_unk,
token_model_proto.token_string_unk)
token_model_proto.token_blank = 4
token_model_proto.token_string_blank = "<BLANK>"
token_model_add_token(
token_model_proto, token_model_proto.token_blank,
token_model_proto.token_string_blank)
token_id = 5
for c in string.ascii_uppercase:
token_model_add_token(token_model_proto, token_id, c)
token_id = token_id + 1
for n in range(10):
token_model_add_token(token_model_proto, token_id, str(n))
token_id = token_id + 1
for c in "$&/-\'.!?,:":
token_model_add_token(token_model_proto, token_id, c)
token_id = token_id + 1
assert token_id == 51
with open("speech4/conf/wsj/token_model.pbtxt", "w") as proto_file:
proto_file.write(str(token_model_proto))
return token_model_proto
def normalize_text_wsj(line):
cols = line.split(' ', 1)
assert len(cols) >= 1 and len(cols) <= 2
uttid = cols[0]
if len(cols) == 1:
utt = ''
elif len(cols) == 2:
utt = cols[1]
assert '#' not in utt
# Normalize to uppercase.
utt = utt.upper()
utt = utt.replace('<NOISE>', '')
utt = utt.replace('&ERSAND', 'AMPERSAND')
utt = utt.replace(')CLOSE_PAREN', 'CLOSEPAREN')
utt = utt.replace(')CLOSE-PAREN', 'CLOSEPAREN')
utt = utt.replace('\"CLOSE-QUOTE', 'CLOSEQUOTE')
utt = utt.replace(':COLON', "COLON")
utt = utt.replace(',COMMA', 'COMMA')
utt = utt.replace('-DASH', 'DASH')
utt = utt.replace('"DOUBLE-QUOTE', 'DOUBLEQUOTE')
utt = utt.replace('"END-QUOTE', 'ENDQUOTE')
utt = utt.replace('"END-OF-QUOTE', 'ENDOFQUOTE')
utt = utt.replace(')END-OF-PAREN', 'ENDOFPAREN')
utt = utt.replace(')END-THE-PAREN', 'ENDTHEPAREN')
utt = utt.replace('!EXCLAMATION-POINT', 'EXCLAMATIONPOINT')
utt = utt.replace('-HYPHEN', 'HYPHEN')
utt = utt.replace('\"IN-QUOTES', 'INQUOTES')
utt = utt.replace('(IN-PARENTHESIS', 'INPARENTHESIS')
utt = utt.replace('{LEFT-BRACE', 'LEFTBRACE')
utt = utt.replace('(LEFT-PAREN', 'LEFTPAREN')
utt = utt.replace('(PAREN', 'PAREN')
utt = utt.replace(')PAREN', 'PAREN')
utt = utt.replace('?QUESTION-MARK', 'QUESTIONMARK')
utt = utt.replace('"QUOTE', 'QUOTE')
utt = utt.replace(')RIGHT-PAREN', 'RIGHTPAREN')
utt = utt.replace('}RIGHT-BRACE', 'RIGHTBRACE')
utt = utt.replace('\'SINGLE-QUOTE', 'SINGLEQUOTE')
utt = utt.replace('/SLASH', 'SLASH')
utt = utt.replace(';SEMI-COLON', "SEMICOLON")
utt = utt.replace(')UN-PARENTHESES', 'UNPARENTHESES')
utt = utt.replace('"UNQUOTE', 'UNQUOTE')
utt = utt.replace('.PERIOD', "PERIOD")
utt = re.sub(r'\([^)]*\)', '', utt)
utt = re.sub(r'<[^)]*>', '', utt)
utt = utt.replace('.', '')
utt = utt.replace('-', '')
utt = utt.replace('!', '')
utt = utt.replace(':', '')
utt = utt.replace(';', '')
utt = utt.replace('*', '')
utt = utt.replace('`', '\'')
utt = utt.replace('~', '')
assert '~' not in utt
assert '`' not in utt
assert '-' not in utt
assert '_' not in utt
assert '.' not in utt
assert ',' not in utt
assert ':' not in utt
assert ';' not in utt
assert '!' not in utt
assert '?' not in utt
assert '<' not in utt
assert '(' not in utt
assert ')' not in utt
assert '[' not in utt
# assert '\'' not in utt
assert '"' not in utt
assert '*' not in utt
# Remove double spaces.
utt = ' '.join(filter(bool, utt.split(' ')))
return [uttid, utt]
def is_vowel(c):
vowels = ["A", "E", "I", "O", "U", "Y"]
return c in vowels
def count_words(s):
return s.count(" ")
def count_vowels(s):
vowels = 0
for c in s:
vowels += is_vowel(c)
return vowels
def convert(
kaldi_scp, kaldi_txt, tf_records):
create_token_model()
# Load the token model.
token_model_proto = token_model_pb2.TokenModelProto()
character_to_token_map = {}
token_model_pbtxt = "/data-local/wchan/speech4/speech4/conf/token_model_character_simple.pbtxt"
token_model_pbtxt = "speech4/conf/wsj/token_model.pbtxt"
with open(token_model_pbtxt, "r") as proto_file:
google.protobuf.text_format.Merge(proto_file.read(), token_model_proto)
for token in token_model_proto.tokens:
character_to_token_map[token.token_string] = token.token_id
text_map = {}
lines = [line.strip() for line in open(kaldi_txt, 'r')]
sorted_uttids = []
for line in lines:
[uttid, utt] = normalize_text_wsj(line)
text_map[uttid] = utt
tf_record_writer = tf.python_io.TFRecordWriter(tf_records)
kaldi_feat_reader = kaldi_io.SequentialBaseFloatMatrixReader(kaldi_scp)
time_factor = 2
utterance_count = 0
features_width = 0
features_len_total = 0
features_len_max = 0
tokens_len_total = 0
tokens_len_max = 0
feature_token_ratio_min = 10
vowel_count_total = 0
word_count_total = 0
pad_min4 = 1e8
pad_max4 = 0
pad_sum4 = 0
examples = []
for uttid, feats in kaldi_feat_reader:
text = text_map[uttid]
tokens = [character_to_token_map[c] for c in text]
features_len = feats.shape[0]
features_len_max = max(features_len_max, features_len)
features_len_total += features_len
features_width = feats.shape[1]
tokens_len = len(tokens)
tokens_len_max = max(tokens_len_max, tokens_len)
tokens_len_total += tokens_len
feats_max = feats.max(1)
s_min = 0
s_max = len(feats_max) - 1
for idx in range(len(feats_max)):
if feats_max[idx] > 1.0:
s_min = idx
break
for idx in range(len(feats_max) - 1, 0, -1):
if feats_max[idx] > 1.0:
s_max = idx
break
assert tokens_len < (s_max - s_min) / 2
if tokens_len:
feature_token_ratio_min = min(feature_token_ratio_min, (s_max - s_min) / tokens_len)
vowel_count = count_vowels(text)
word_count = count_words(text)
vowel_count_total += vowel_count
word_count_total += word_count
pad4 = features_len / time_factor - tokens_len - word_count * 2 - vowel_count
if pad4 <= 0:
pad4 = features_len / time_factor - tokens_len
if pad4 <= 0:
print "skipping %s %s" % (uttid, text)
if pad4 > 0:
pad_min4 = min(pad_min4, pad4)
pad_max4 = max(pad_max4, pad4)
pad_sum4 += pad4
example = tf.train.Example(features=tf.train.Features(feature={
'features_len': tf.train.Feature(int64_list=tf.train.Int64List(value=[feats.shape[0]])),
'features': tf.train.Feature(float_list=tf.train.FloatList(value=feats.flatten('C').tolist())),
'tokens': tf.train.Feature(int64_list=tf.train.Int64List(value=tokens)),
'uttid': tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(uttid)])),
's_min': tf.train.Feature(int64_list=tf.train.Int64List(value=[s_min])),
's_max': tf.train.Feature(int64_list=tf.train.Int64List(value=[s_max])),
'text': tf.train.Feature(bytes_list=tf.train.BytesList(value=[text]))}))
tf_record_writer.write(example.SerializeToString())
utterance_count += 1
print("utterance_count: %d" % utterance_count)
print("features_width: %d" % features_width)
print("features_len_avg: %f" % (float(features_len_total) / float(utterance_count)))
print("features_len_total: %d" % features_len_total)
print("features_len_max: %d" % features_len_max)
print("tokens_len_total: %d" % tokens_len_total)
print("tokens_len_avg: %f" % (float(tokens_len_total) / float(utterance_count)))
print("tokens_len_max: %d" % tokens_len_max)
print("feature_token_ratio_min: %f" % feature_token_ratio_min)
print("vowel_count_total: %d" % vowel_count_total)
print("vowel_ratio: %f" % (float(vowel_count_total) / float(features_len_total / time_factor)))
print("word_count_total: %d" % word_count_total)
print("word_ratio: %f" % (float(word_count_total) / float(features_len_total / time_factor)))
print("pad_min4: %d" % pad_min4)
print("pad_max4: %d" % pad_max4)
print("pad_sum4: %d" % pad_sum4)
print("pad_avg4: %f" % (float(pad_sum4) / float(utterance_count)))
print("pad_ratio: %f" % (float(pad_sum4) / float(features_len_total / time_factor)))
if __name__ == '__main__':
main()
|
apache-2.0
|
TritonSailor/btce-api
|
samples/watch.py
|
17
|
2213
|
#!/usr/bin/python
import sys
import time
import wx
import matplotlib
matplotlib.use("WXAgg")
matplotlib.rcParams['toolbar'] = 'None'
import matplotlib.pyplot as plt
import pylab
import btceapi
class Chart(object):
def __init__(self, symbol):
self.symbol = symbol
self.base = symbol.split("_")[0].upper()
self.alt = symbol.split("_")[1].upper()
self.ticks = btceapi.getTradeHistory(self.symbol)
self.last_tid = max([t.tid for t in self.ticks])
self.fig = plt.figure()
self.axes = self.fig.add_subplot(111)
self.bid_line, = self.axes.plot(*zip(*self.bid), \
linestyle='None', marker='o', color='red')
self.ask_line, = self.axes.plot(*zip(*self.ask), \
linestyle='None', marker='o', color='green')
self.fig.canvas.draw()
self.timer_id = wx.NewId()
self.actor = self.fig.canvas.manager.frame
self.timer = wx.Timer(self.actor, id=self.timer_id)
self.timer.Start(10000) # update every 10 seconds
wx.EVT_TIMER(self.actor, self.timer_id, self.update)
pylab.show()
@property
def bid(self):
return [(t.date, t.price) for t in self.ticks if t.trade_type == u'bid']
@property
def ask(self):
return [(t.date, t.price) for t in self.ticks if t.trade_type == u'ask']
def update(self, event):
ticks = btceapi.getTradeHistory(self.symbol)
self.ticks += [t for t in ticks if t.tid > self.last_tid]
for t in ticks:
if t.tid > self.last_tid:
print "%s: %s %f at %s %f" % \
(t.trade_type, self.base, t.amount, self.alt, t.price)
self.last_tid = max([t.tid for t in ticks])
x, y = zip(*self.bid)
self.bid_line.set_xdata(x)
self.bid_line.set_ydata(y)
x, y = zip(*self.ask)
self.ask_line.set_xdata(x)
self.ask_line.set_ydata(y)
pylab.gca().relim()
pylab.gca().autoscale_view()
self.fig.canvas.draw()
if __name__ == "__main__":
symbol = "btc_usd"
try:
symbol = sys.argv[1]
except IndexError:
pass
chart = Chart(symbol)
|
mit
|
DartML/SteinGAN
|
mnist/load.py
|
1
|
1301
|
import sys
sys.path.append('..')
import numpy as np
import os
from time import time
from collections import Counter
import random
from matplotlib import pyplot as plt
import theano
from lib.data_utils import shuffle
from lib.config import data_dir
import numpy as np
import scipy.io as sio
import theano
def mnist():
fd = open(os.path.join(data_dir, 'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trX = loaded[16:].reshape((60000, 28 * 28)).astype(float)
fd = open(os.path.join(data_dir, 'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trY = loaded[8:].reshape((60000))
fd = open(os.path.join(data_dir, 't10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teX = loaded[16:].reshape((10000, 28 * 28)).astype(float)
fd = open(os.path.join(data_dir, 't10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teY = loaded[8:].reshape((10000))
trY = np.asarray(trY)
teY = np.asarray(teY)
return trX, teX, trY, teY
def mnist_with_valid_set():
trX, teX, trY, teY = mnist()
trX, trY = shuffle(trX, trY)
vaX = trX[50000:]
vaY = trY[50000:]
trX = trX[:50000]
trY = trY[:50000]
return trX, vaX, teX, trY, vaY, teY
|
mit
|
jmmease/pandas
|
pandas/tests/indexes/period/test_indexing.py
|
4
|
11822
|
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas.compat import lrange
from pandas._libs import tslib
from pandas import (PeriodIndex, Series, DatetimeIndex,
period_range, Period)
class TestGetItem(object):
def setup_method(self, method):
pass
def test_getitem(self):
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx[0]
assert result == pd.Period('2011-01-01', freq='D')
result = idx[-1]
assert result == pd.Period('2011-01-31', freq='D')
result = idx[0:5]
expected = pd.period_range('2011-01-01', '2011-01-05', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[0:10:2]
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05',
'2011-01-07', '2011-01-09'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[-20:-5:3]
expected = pd.PeriodIndex(['2011-01-12', '2011-01-15',
'2011-01-18',
'2011-01-21', '2011-01-24'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[4::-1]
expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
def test_getitem_index(self):
idx = period_range('2007-01', periods=10, freq='M', name='x')
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(['2007-02', '2007-04', '2007-06'],
freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False,
True, True, False, False, False]]
exp = pd.PeriodIndex(['2007-01', '2007-02', '2007-06', '2007-07'],
freq='M', name='x')
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
ts = Series(np.random.randn(len(rng)), rng)
pytest.raises(KeyError, ts.__getitem__, '2006')
result = ts['2008']
assert (result.index.year == 2008).all()
result = ts['2008':'2009']
assert len(result) == 24
result = ts['2008-1':'2009-12']
assert len(result) == 24
result = ts['2008Q1':'2009Q4']
assert len(result) == 24
result = ts[:'2009']
assert len(result) == 36
result = ts['2009':]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
tm.assert_raises_regex(KeyError,
"left slice bound for non-unique "
"label: '2008'",
ts.__getitem__, slice('2008', '2009'))
def test_getitem_datetime(self):
rng = period_range(start='2012-01-01', periods=10, freq='W-MON')
ts = Series(lrange(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
assert idx[0] == pd.Period('2011-01', freq='M')
assert idx[1] is tslib.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert (s[pd.Period('2011-01', freq='M')] ==
pd.Period('2011-01', freq='M'))
assert s[pd.NaT] is tslib.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start='2012-01-01', periods=10, freq='D')
ts = Series(lrange(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period('2012-01-02', freq='D')]], exp)
def test_getitem_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S',
periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])
tm.assert_series_equal(s['2013/01/01 9H'], s[:3600])
for d in ['2013/01/01', '2013/01', '2013']:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH 6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01'], s[0:31])
tm.assert_series_equal(s['2013/02'], s[31:59])
tm.assert_series_equal(s['2014'], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with pytest.raises(KeyError):
s[v]
class TestIndexing(object):
def test_get_loc_msg(self):
idx = period_range('2000-1-1', freq='A', periods=10)
bad_period = Period('2012', 'A')
pytest.raises(KeyError, idx.get_loc, bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03'])
pidx = PeriodIndex(['2011-01-01', 'NaT', '2011-01-03'], freq='M')
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float('nan')) == 1
assert idx.get_loc(np.nan) == 1
def test_take(self):
# GH 10295
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period('2011-01-01', freq='D')
result = idx.take([5])
assert result == pd.Period('2011-01-06', freq='D')
result = idx.take([0, 1, 2])
expected = pd.period_range('2011-01-01', '2011-01-03', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == 'D'
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05'], freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(['2011-01-08', '2011-01-05',
'2011-01-02'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([3, 2, 5])
expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([-3, 2, 5])
expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
def test_take_misc(self):
index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D',
name='idx')
expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7),
datetime(2010, 1, 9), datetime(2010, 1, 13)],
freq='D', name='idx')
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH 12631
idx = pd.PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx', freq='D')
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
|
bsd-3-clause
|
Adai0808/scikit-learn
|
sklearn/metrics/tests/test_common.py
|
83
|
41144
|
from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not coverd by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": r2_score,
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_MULTICLASS = [
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
# Those metrics don't support multiclass outputs
"average_precision_score", "weighted_average_precision_score",
"micro_average_precision_score", "macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_average_precision_score",
"roc_auc_score", "micro_roc_auc", "weighted_roc_auc",
"macro_roc_auc", "samples_roc_auc",
"coverage_error",
"brier_score_loss",
"label_ranking_loss",
]
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Treshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"cohen_kappa_score",
"confusion_matrix",
"hamming_loss",
"matthews_corrcoef_score",
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(NOT_SYMMETRIC_METRICS,
THRESHOLDED_METRICS,
METRIC_UNDEFINED_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if name in METRIC_UNDEFINED_MULTICLASS or name in THRESHOLDED_METRICS:
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 += [0]*n_classes
y2 += [0]*n_classes
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg="Weighted scores for array and list sample_weight input are "
"not equal (%f != %f) for %s" % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary output
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.shape[1] > 0:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
|
bsd-3-clause
|
Robbie1977/3DwarpScoring
|
slicescore.py
|
1
|
3477
|
import nrrd
import sys, warnings
from numpy import int,round,linspace, newaxis, shape, array, uint32, uint8, max, sqrt, abs, mean, dtype, int32, add, divide, subtract, sum, square, multiply, asarray, squeeze, float128, average, ones, fmin
#from matplotlib.pyplot import imshow, figure, show, colorbar
#import matplotlib.cm as cm
def zsampleslice(data):
"""Returns four sample Z slices through a 3d image array."""
data = array(data,ndmin=3)
l=shape(data)[2]
a=data[:,:,0]
s=int(l/3)
b=data[:,:,(s)]
c=data[:,:,(-s)]
d=data[:,:,(l-1)]
return array([a,b,c,d])
def ysampleslice(data):
"""Returns four sample Z slices through a 3d image array."""
data = array(data,ndmin=3)
l=shape(data)[1]
a=data[:,0,:]
s=int(l/4)
b=data[:,s,:]
c=data[:,-s,:]
d=data[:,l-1,:]
return array([a,b,c,d])
def xsampleslice(data):
"""Returns four sample Z slices through a 3d image array."""
data = array(data,ndmin=3)
l=shape(data)[0]
a=data[0,:,:]
s=int(l/4)
b=data[s,:,:]
c=data[-s,:,:]
d=data[l-1,:,:]
return array([a,b,c,d])
def RMSdiff(data1,data2):
"""Returns the RMS difference between two images."""
return sqrt(mean(abs(data1-(data2+0.0))**2.0))
def OverlapCoeff(data1,data2):
"""Returns the Overlap Coefficent between two images."""
Nd1 = squeeze(asarray(data1,dtype=float128))
Nd2 = squeeze(asarray(data2,dtype=float128))
R = sum(multiply(Nd1,Nd2))/sqrt(multiply(sum(square(Nd1)),sum(square(Nd2))))
print R
return R
def minOverlapCoeff(data1,data2):
"""Returns the min Overlap Coefficent between image slices."""
R=[]
print shape(data1)
for i in range(0,min(shape(data1))):
Nd1 = squeeze(asarray(data1[i],dtype=float128))
if sum(Nd1) < 1: Nd1[0,0] = 1.0
print shape(Nd1)
print sum(Nd1)
Nd2 = squeeze(asarray(data2[i],dtype=float128))
if sum(Nd2) < 1: Nd2[0,0] = 1.0
print shape(Nd2)
print sum(Nd2)
if (sum(Nd1) + sum(Nd2)) > 0:
R.append(sum(multiply(Nd1,Nd2))/sqrt(multiply(sum(square(Nd1)),sum(square(Nd2)))))
else:
print 'Note: both equal only as blank'
R.append(1.0)
print R
return fmin(R)
def avgOverlapCoeff(data1,data2):
"""Returns the min Overlap Coefficent between image slices."""
R=[]
# print shape(data1)
weights=ones(min(shape(data1)),dtype=float)
weights[0]=0.1
weights[-1]=0.1
for i in range(0,min(shape(data1))):
Nd1 = squeeze(asarray(data1[i],dtype=float128))
if sum(Nd1) < 1: Nd1[0,0] = 1.0
# print shape(Nd1)
# print sum(Nd1)
Nd2 = squeeze(asarray(data2[i],dtype=float128))
if sum(Nd2) < 1: Nd2[0,0] = 1.0
# print shape(Nd2)
# print sum(Nd2)
if (sum(Nd1) + sum(Nd2)) > 0:
R.append(sum(multiply(Nd1,Nd2))/sqrt(multiply(sum(square(Nd1)),sum(square(Nd2)))))
else:
print 'Note: both equal only as blank'
R.append(1.0)
print R
print weights
print average(R, weights=weights)
return average(R, weights=weights)
def symTest(function,data):
"""Applies the given function to the diagonal slices output from xslice. Can be used to assess the symetry of a 3D image using a comparison function such as OverlapCoeff."""
if data.ndim < 3:
warnings.warn("must be used with data output from xslice", SyntaxWarning)
return False
else:
return function(data[0],data[1])
|
mit
|
matthewfranglen/spark
|
python/pyspark/sql/tests/test_arrow.py
|
6
|
21045
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import threading
import time
import unittest
import warnings
from pyspark import SparkContext, SparkConf
from pyspark.sql import Row, SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import *
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
from pyspark.util import _exception_message
if have_pandas:
import pandas as pd
from pandas.util.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
super(ArrowTests, cls).setUpClass()
cls.warnings_lock = threading.Lock()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
# Test fallback
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "false")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "false"
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "true"
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "true")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "true"
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "false"
# Enable Arrow optimization in this tests.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True),
StructField("9_binary_t", BinaryType(), True)])
cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1), bytearray(b"a")),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2), bytearray(b"bb")),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3), bytearray(b"ccc")),
(u"d", 4, 40, 1.0, 8.0, Decimal("8.0"),
date(2262, 4, 12), datetime(2262, 3, 3, 3, 3, 3), bytearray(b"dddd"))]
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
super(ArrowTests, cls).tearDownClass()
def create_pandas_data_frame(self):
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([({u'a': 1},)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
assert_frame_equal(pdf, pd.DataFrame({u'map': [{u'a': 1}]}))
def test_toPandas_fallback_disabled(self):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with self.assertRaisesRegexp(Exception, 'Unsupported type'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
assert_frame_equal(expected, pdf)
assert_frame_equal(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/Los_Angeles"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow_la, pdf_la)
timezone = "America/New_York"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.pandas.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
assert_frame_equal(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
assert_frame_equal(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def test_no_partition_frame(self):
schema = StructType([StructField("field1", StringType(), True)])
df = self.spark.createDataFrame(self.sc.emptyRDD(), schema)
pdf = df.toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "field1")
self.assertTrue(pdf.empty)
def test_propagates_spark_exception(self):
df = self.spark.range(3).toDF("i")
def raise_exception():
raise Exception("My error")
exception_udf = udf(raise_exception, IntegerType())
df = df.withColumn("error", exception_udf())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'My error'):
df.toPandas()
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEquals(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/Los_Angeles"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
timezone = "America/New_York"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEquals(self.schema, df.schema)
pdf_arrow = df.toPandas()
assert_frame_equal(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
fields = list(self.schema)
fields[0], fields[1] = fields[1], fields[0] # swap str with int
wrong_schema = StructType(fields)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "integer.*required"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
new_names = list(map(str, range(len(self.schema.fieldNames()))))
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
def test_createDataFrame_column_name_encoding(self):
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.iloc[0, 7] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.iloc[1, 1] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.pandas.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEquals(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_int_col_names(self):
import numpy as np
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
df = self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a={u'a': 1})])
def test_createDataFrame_fallback_disabled(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df_from_python.toPandas())
assert_frame_equal(pdf, df_from_pandas.toPandas())
# Regression test for SPARK-28003
def test_timestamp_nat(self):
dt = [pd.NaT, pd.Timestamp('2019-06-11'), None] * 100
pdf = pd.DataFrame({'time': dt})
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf)
assert_frame_equal(pdf, df_no_arrow.toPandas())
assert_frame_equal(pdf, df_arrow.toPandas())
def test_toPandas_batch_order(self):
def delay_first_part(partition_index, iterator):
if partition_index == 0:
time.sleep(0.1)
return iterator
# Collects Arrow RecordBatches out of order in driver JVM then re-orders in Python
def run_test(num_records, num_parts, max_records, use_delay=False):
df = self.spark.range(num_records, numPartitions=num_parts).toDF("a")
if use_delay:
df = df.rdd.mapPartitionsWithIndex(delay_first_part).toDF()
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": max_records}):
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf, pdf_arrow)
cases = [
(1024, 512, 2), # Use large num partitions for more likely collecting out of order
(64, 8, 2, True), # Use delay in first partition to force collecting out of order
(64, 64, 1), # Test single batch per partition
(64, 1, 64), # Test single partition, single batch
(64, 1, 8), # Test single partition, multiple batches
(30, 7, 2), # Test different sized partitions
]
for case in cases:
run_test(*case)
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class MaxResultArrowTests(unittest.TestCase):
# These tests are separate as 'spark.driver.maxResultSize' configuration
# is a static configuration to Spark context.
@classmethod
def setUpClass(cls):
cls.spark = SparkSession(SparkContext(
'local[4]', cls.__name__, conf=SparkConf().set("spark.driver.maxResultSize", "10k")))
# Explicitly enable Arrow and disable fallback.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def test_exception_by_max_results(self):
with self.assertRaisesRegexp(Exception, "is bigger than"):
self.spark.range(0, 10000, 1, 100).toPandas()
class EncryptionArrowTests(ArrowTests):
@classmethod
def conf(cls):
return super(EncryptionArrowTests, cls).conf().set("spark.io.encryption.enabled", "true")
if __name__ == "__main__":
from pyspark.sql.tests.test_arrow import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
mit
|
frankiecrouch/Bayesian-Decision-Trees
|
Scripts/Serial-BCART/SerialBCART.py
|
1
|
6179
|
from BayesianTree import Node, Tree, acceptance
import numpy as np
import pandas as pd
import copy
import time
start_time = time.time()
#****************************************************************************************************
# read in the data
#****************************************************************************************************
train_data = pd.read_csv("/Users/Frankie/Documents/Dissertation/Data/pancreatic/pancreatic_1_train.csv")
y_train = train_data['label'].as_matrix()
X_train = train_data.drop('label', axis=1).as_matrix()
test_data = pd.read_csv("/Users/Frankie/Documents/Dissertation/Data/pancreatic/pancreatic_1_test.csv")
y_test = test_data['label'].as_matrix()
X_test = test_data.drop('label', axis=1).as_matrix()
end_data_load = time.time()
#****************************************************************************************************
# set parameters: no. of iterations, no. of repeats, alpha and beta
#****************************************************************************************************
iterations = 5000
repeat = 500
alpha = 0.95
beta = 1.5
#****************************************************************************************************
# create arrays to store results:
# - AUC on the training data
# - AUC on the testing data
# - runtime
#****************************************************************************************************
results_auc_train = np.zeros((repeat , iterations), dtype = np.object)
results_auc_test = np.zeros((repeat , iterations), dtype = np.object)
results_runtime = np.zeros((repeat , iterations), dtype = np.float)
#****************************************************************************************************
# create the starting tree with just a root node
#****************************************************************************************************
starting_indices = np.arange(X_train.shape[0])
rootNode = Node(data_indices = starting_indices)
tree = Tree(root_node=rootNode, alpha = alpha, beta = beta, X=X_train, Y=y_train)
tree.calc_likelihood()
#****************************************************************************************************
# start the iterations
#****************************************************************************************************
for j in range(0,repeat):
# create a copy of the root node tree add the beginning of each MCMC chain
current_tree = copy.deepcopy(tree)
for i in range (0,iterations):
# uncomment to see progress as script is running
print "repeat " +str(j) + " iteration " +str(i)
# start timer
start = time.time()
# generate the candidate tree
candidate_tree = copy.deepcopy(current_tree)
# CHANGE, PRUNE, CHANGE OR SWAP
random_proposal = np.random.randint(4)
if random_proposal == 0:
candidate_tree.grow()
elif random_proposal == 1:
candidate_tree.prune()
elif random_proposal == 2:
candidate_tree.change()
elif random_proposal == 3:
candidate_tree.swap()
# update the likelihood of the candidate tree
candidate_tree.calc_likelihood()
# calc acceptance
acpt = acceptance(current_tree, candidate_tree)
# generate random number
random_acceptance = np.random.uniform(0,1)
# update tree if accepting
if random_acceptance < acpt:
current_tree = copy.deepcopy(candidate_tree)
# uncomment to print the tree
# filename = "tree_" + str(i)
# current_tree.printTree(filename = filename)
#end timer
stop = time.time()
# record the results
auc_train = current_tree.train_auc()
auc_test = current_tree.test_auc(X_test, y_test)
results_auc_train[j][i] = auc_train
results_auc_test[j][i] = auc_test
results_runtime[j][i] = (stop-start)
end_total = time.time()
#****************************************************************************************************
# find the best tree from each chain by chosing the tree with the max AUC
#****************************************************************************************************
arg_max_auc = np.argmax(results_auc_train, axis = 1)
all_results = []
for i in range(0,repeat):
all_results.append(results_auc_test[i][arg_max_auc[i]])
# calculate the average AUC and stdv
mean_result = np.average(np.asarray(all_results))
std = np.std(np.asarray(all_results))
#****************************************************************************************************
# export results
#****************************************************************************************************
# raw data
np.savetxt("auc_test.txt", results_auc_test, delimiter=',')
np.savetxt("auc_train.txt", results_auc_train, delimiter=',')
np.savetxt("runtime.txt", results_runtime, delimiter=',')
# summary of the runtime results
total_iterations_time = (np.sum(results_runtime))/60
min_chain = (np.min(np.sum(results_runtime, axis=1)))/60
max_chain = (np.max(np.sum(results_runtime, axis=1)))/60
ave_chain = (np.mean(np.sum(results_runtime, axis=1)))/60
total_runtime = (end_total - start_time)/60
load_data_time = (end_data_load - start_time)/60
with open('time_results.txt', 'w' ) as f:
f.write("Total runtime was %f minutes" % total_runtime)
f.write(", which is %f hours \n" % (total_runtime/60))
f.write("The data load took %f minutes \n" % load_data_time)
f.write("The total time spent doing the MCMC chains was %f minutes \n" % total_iterations_time)
f.write("The min, max and average MCMC chain of length %d was: %f, %f, %f minutes" % (iterations, min_chain, max_chain,ave_chain))
# summary of the prediction results
with open(('results_summary.txt'), 'w') as f:
f.write('beta, AUC, stdv \n')
f.write(str(beta) + "," +
str(mean_result) + ","+
str(std))
|
mit
|
UDST/osmnet
|
osmnet/tests/test_load.py
|
1
|
9414
|
import numpy.testing as npt
import pandas.util.testing as pdt
import pytest
import shapely.geometry as geometry
import osmnet.load as load
@pytest.fixture(scope='module')
def bbox1():
# Intersection of Telegraph and Haste in Berkeley
# Sample query: http://overpass-turbo.eu/s/6AK
return 37.8659303546, -122.2588003879, 37.8661598571, -122.2585062512
@pytest.fixture(scope='module')
def bbox2():
# Telegraph Channing to Durant in Berkeley
# Sample query: http://overpass-turbo.eu/s/6B0
return 37.8668405874, -122.2590948685, 37.8679028054, -122.2586363885
@pytest.fixture(scope='module')
def bbox3():
# West Berkeley including highway 80, frontage roads, and foot paths
# Sample query: http://overpass-turbo.eu/s/6VE
return (
37.85225504880375, -122.30295896530151,
37.85776128099243, - 122.2954273223877)
@pytest.fixture
def bbox4():
return (-122.2762870789, 37.8211879615,
-122.2701716423, 37.8241329692)
@pytest.fixture
def bbox5():
return (-122.2965574674, 37.8038112007,
-122.2935963086, 37.8056400922)
@pytest.fixture
def simple_polygon():
polygon = geometry.Polygon([[0, 0], [1, 0], [1, 1], [0, 1]])
return polygon
@pytest.fixture(scope='module')
def query_data1(bbox1):
lat_min, lng_max, lat_max, lng_min = bbox1
query_template = '[out:json][timeout:{timeout}]{maxsize};(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},{lat_max:.8f},' \
'{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=load.osm_filter('walk'),
timeout=180, maxsize='')
return load.overpass_request(data={'data': query_str})
@pytest.fixture(scope='module')
def query_data2(bbox2):
lat_min, lng_max, lat_max, lng_min = bbox2
query_template = '[out:json][timeout:{timeout}]{maxsize};(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},{lat_max:.8f},' \
'{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=load.osm_filter('walk'),
timeout=180, maxsize='')
return load.overpass_request(data={'data': query_str})
@pytest.fixture(scope='module')
def dataframes1(query_data1):
return load.parse_network_osm_query(query_data1)
@pytest.fixture(scope='module')
def dataframes2(query_data2):
return load.parse_network_osm_query(query_data2)
def test_make_osm_query(query_data1):
assert isinstance(query_data1, dict)
assert len(query_data1['elements']) == 27
assert len([e for e in query_data1['elements']
if e['type'] == 'node']) == 23
assert len([e for e in query_data1['elements']
if e['type'] == 'way']) == 4
def test_process_node():
test_node = {
'id': 'id',
'lat': 'lat',
'lon': 'lon',
'extra': 'extra'
}
expected = {
'id': 'id',
'lat': 'lat',
'lon': 'lon'
}
assert load.process_node(test_node) == expected
test_node['tags'] = {'highway': 'highway', 'source': 'source'}
expected['highway'] = 'highway'
assert load.process_node(test_node) == expected
def test_process_way():
test_way = {
"type": "way",
"id": 188434143,
"timestamp": "2014-01-04T22:18:14Z",
"version": 2,
"changeset": 19814115,
"user": "dchiles",
"uid": 153669,
"nodes": [
53020977,
53041093,
],
"tags": {
'source': 'source',
"addr:city": "Berkeley",
"highway": "secondary",
"name": "Telegraph Avenue",
}
}
expected_way = {
'id': test_way['id'],
'highway': test_way['tags']['highway'],
'name': test_way['tags']['name']
}
expected_waynodes = [
{'way_id': test_way['id'], 'node_id': test_way['nodes'][0]},
{'way_id': test_way['id'], 'node_id': test_way['nodes'][1]}
]
way, waynodes = load.process_way(test_way)
assert way == expected_way
assert waynodes == expected_waynodes
def test_parse_network_osm_query(dataframes1):
nodes, ways, waynodes = dataframes1
assert len(nodes) == 23
assert len(ways) == 4
assert len(waynodes.index.unique()) == 4
def test_parse_network_osm_query_raises():
query_template = '[out:json][timeout:{timeout}]{maxsize};(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},{lat_max:.8f},' \
'{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=37.8, lng_min=-122.252,
lat_min=37.8, lng_max=-122.252,
filters=load.osm_filter('walk'),
timeout=180, maxsize='')
data = load.overpass_request(data={'data': query_str})
with pytest.raises(RuntimeError):
load.parse_network_osm_query(data)
def test_overpass_request_raises(bbox5):
lat_min, lng_max, lat_max, lng_min = bbox5
query_template = '[out:json][timeout:{timeout}]{maxsize};(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},{lat_max:.8f},' \
'{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=load.osm_filter('walk'),
timeout=0, maxsize='')
with pytest.raises(Exception):
load.overpass_request(data={'data': query_str})
def test_get_pause_duration():
error_pause_duration = load.get_pause_duration(recursive_delay=5,
default_duration=10)
assert error_pause_duration >= 0
def test_quadrat_cut_geometry(simple_polygon):
multipolygon = load.quadrat_cut_geometry(geometry=simple_polygon,
quadrat_width=0.5,
min_num=3,
buffer_amount=1e-9)
assert isinstance(multipolygon, geometry.MultiPolygon)
assert len(multipolygon) == 4
def test_ways_in_bbox(bbox1, dataframes1):
lat_min, lng_max, lat_max, lng_min = bbox1
nodes, ways, waynodes = load.ways_in_bbox(lat_min=lat_min, lng_min=lng_min,
lat_max=lat_max, lng_max=lng_max,
network_type='walk')
exp_nodes, exp_ways, exp_waynodes = dataframes1
pdt.assert_frame_equal(nodes, exp_nodes)
pdt.assert_frame_equal(ways, exp_ways)
pdt.assert_frame_equal(waynodes, exp_waynodes)
@pytest.mark.parametrize(
'network_type, noset',
[('walk', {'motorway', 'motorway_link'}),
('drive', {'footway', 'cycleway'})])
def test_ways_in_bbox_walk_network(bbox3, network_type, noset):
lat_min, lng_max, lat_max, lng_min = bbox3
nodes, ways, waynodes = load.ways_in_bbox(lat_min=lat_min, lng_min=lng_min,
lat_max=lat_max, lng_max=lng_max,
network_type=network_type)
for _, way in ways.iterrows():
assert way['highway'] not in noset
def test_intersection_nodes1(dataframes1):
_, _, waynodes = dataframes1
intersections = load.intersection_nodes(waynodes)
assert intersections == {53041093}
def test_intersection_nodes2(dataframes2):
_, _, waynodes = dataframes2
intersections = load.intersection_nodes(waynodes)
assert intersections == {53099275, 53063555}
def test_node_pairs_two_way(dataframes2):
nodes, ways, waynodes = dataframes2
pairs = load.node_pairs(nodes, ways, waynodes)
assert len(pairs) == 1
fn = 53063555
tn = 53099275
pair = pairs.loc[(fn, tn)]
assert pair.from_id == fn
assert pair.to_id == tn
npt.assert_allclose(pair.distance, 101.48279182499789)
def test_node_pairs_one_way(dataframes2):
nodes, ways, waynodes = dataframes2
pairs = load.node_pairs(nodes, ways, waynodes, two_way=False)
assert len(pairs) == 2
n1 = 53063555
n2 = 53099275
for p1, p2 in [(n1, n2), (n2, n1)]:
pair = pairs.loc[(p1, p2)]
assert pair.from_id == p1
assert pair.to_id == p2
npt.assert_allclose(pair.distance, 101.48279182499789)
def test_column_names(bbox4):
nodes, edges = load.network_from_bbox(
bbox=bbox4, network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000
)
col_list = ['x', 'y', 'id']
for col in col_list:
assert col in nodes.columns
col_list = ['distance', 'from', 'to']
for col in col_list:
assert col in edges.columns
def test_custom_query_pass(bbox5):
nodes, edges = load.network_from_bbox(
bbox=bbox5, custom_osm_filter='["highway"="service"]'
)
assert len(nodes) == 24
assert len(edges) == 32
assert edges['highway'].unique() == 'service'
|
agpl-3.0
|
MetrodataTeam/incubator-airflow
|
tests/contrib/hooks/test_bigquery_hook.py
|
16
|
8098
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow.contrib.hooks import bigquery_hook as hook
from oauth2client.contrib.gce import HttpAccessTokenRefreshError
bq_available = True
try:
hook.BigQueryHook().get_service()
except HttpAccessTokenRefreshError:
bq_available = False
class TestBigQueryDataframeResults(unittest.TestCase):
def setUp(self):
self.instance = hook.BigQueryHook()
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_output_is_dataframe_with_valid_query(self):
import pandas as pd
df = self.instance.get_pandas_df('select 1')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_invalid_query(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('from `1`')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_legacy_query(self):
df = self.instance.get_pandas_df('select 1', dialect='legacy')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_std_query(self):
df = self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='standard')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_incompatible_syntax(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='legacy')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('dataset.table', None)
self.assertIn('INTERNAL: No default project is specified',
str(context.exception), "")
def test_split_dataset_table(self):
project, dataset, table = hook._split_tablename('dataset.table',
'project')
self.assertEqual("project", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative:dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_sql_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative.dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_colon_in_project(self):
project, dataset, table = hook._split_tablename('alt1:alt.dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_valid_double_column(self):
project, dataset, table = hook._split_tablename('alt1:alt:dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_invalid_syntax_triple_colon(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt3:dataset.table',
'project')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_triple_dot(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_column_double_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt.dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_colon_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt:dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_dot_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project', 'var_x')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
class TestBigQueryHookSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
# since we passed 'json' in, and it's not valid, make sure it's present in the error string.
self.assertIn("JSON", str(context.exception))
class TestBigQueryBaseCursor(unittest.TestCase):
def test_invalid_schema_update_options(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"]
)
self.assertIn("THIS IS NOT VALID", str(context.exception))
def test_invalid_schema_update_and_write_disposition(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY'
)
self.assertIn("schema_update_options is only", str(context.exception))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
nagyistoce/kaggle-galaxies
|
try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_pysexgen1_dup.py
|
7
|
17744
|
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_pysexgen1_dup.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_pysexgen1_dup.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes, processor_class=ra.LoadAndProcessPysexGen1CenteringRescaling)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
|
bsd-3-clause
|
fabianp/scikit-learn
|
examples/text/mlcomp_sparse_document_classification.py
|
292
|
4498
|
"""
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
|
bsd-3-clause
|
huzq/scikit-learn
|
examples/linear_model/plot_logistic_path.py
|
19
|
2352
|
#!/usr/bin/env python
"""
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least regularized. The 4
coefficients of the models are collected and plotted as a "regularization
path": on the left-hand side of the figure (strong regularizers), all the
coefficients are exactly 0. When regularization gets progressively looser,
coefficients can get non-zero values one after the other.
Here we choose the liblinear solver because it can efficiently optimize for the
Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty.
Also note that we set a low value for the tolerance to make sure that the model
has converged before collecting the coefficients.
We also use warm_start=True which means that the coefficients of the models are
reused to initialize the next model fit to speed-up the computation of the
full-path.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X /= X.max() # Normalize X to speed-up convergence
# #############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 7, 16)
print("Computing regularization path ...")
start = time()
clf = linear_model.LogisticRegression(penalty='l1', solver='liblinear',
tol=1e-6, max_iter=int(1e6),
warm_start=True,
intercept_scaling=10000.)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took %0.3fs" % (time() - start))
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_, marker='o')
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
andrewnc/scikit-learn
|
examples/neighbors/plot_nearest_centroid.py
|
264
|
1804
|
"""
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
hopehhchen/Droplets
|
Droplets/Droplets.py
|
1
|
52419
|
import sys
import warnings
#
import numpy as np
import scipy
#
from astropy.io import fits
import astropy.wcs as wcs
import astropy.units as u
import astropy.constants as c
import astropy.modeling as modeling
import astropy.visualization.wcsaxes as wcsaxes
#
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.ticker as ticker
from matplotlib import rcParams
#
import pandas as pd
#
from stat_tools import *
from constants import *
from ssk_colors import *
from plot_tools import *
import styles
def centroidMask(mask):
'''
The function used to calculate the centroid of the boolean mask.
Input
------
mask: a 2D-array, either of type boolean, or any type that can be converted
to the boolean.
Output
------
xcent, ycent: (x, y) [axis-1 and axis-0]
'''
# Create the coordinate grids.
mask = mask.astype(bool)
xgrid, ygrid = np.meshgrid(np.arange(mask.shape[1], dtype = float),
np.arange(mask.shape[0], dtype = float))
# Calculate the centroid based on the boolean mask.
xcent = np.average(xgrid[mask],
weights = mask[mask].astype(float))
ycent = np.average(ygrid[mask],
weights = mask[mask].astype(float))
return xcent, ycent
def frameMask(mask):
'''
The function used to determine the frame in which the structure is centered.
This is mainly for plotting.
Input
------
mask: a 2D-array, either of type boolean, or any type that can be converted
to the boolean.
Output
------
xcorner, ycorder: bottom left (x, y) [axis-1 and axis-0] of the frame
width, height: of the frame
'''
# Create the coordinate grids.
mask = mask.astype(bool)
xgrid, ygrid = np.meshgrid(np.arange(mask.shape[1], dtype = float),
np.arange(mask.shape[0], dtype = float))
# Calculate the extent of the mask.
xmin, xmax = np.min(xgrid[mask]), np.max(xgrid[mask])
ymin, ymax = np.min(ygrid[mask]), np.max(ygrid[mask])
# Calculate the frame based on the extent
xcorner = xmin - .75*(xmax-xmin)
ycorner = ymin - .75*(ymax-ymin)
width = 2.5*(xmax-xmin)
height = 2.5*(ymax-ymin)
return xcorner, ycorner, width, height
def fitGradient(mask, Vlsr, eVlsr):
'''
The function used to fit a 1st-degree 2D polynomial to the Vlsr field.
Input
------
mask:
Vlsr:
Output
------
gradfit: an astropy.modeling.model object
fitter: an astropy.modeling.fitting object
Vlsr_predicted:
'''
# Create the coordinate grid; shift acoording to the centroid; read Vlsr and eVlsr.
mask = mask.astype(bool)
xcent, ycent = centroidMask(mask)
xgrid, ygrid = np.meshgrid(np.arange(mask.shape[1], dtype = float),
np.arange(mask.shape[0], dtype = float))
xgrid -= xcent
ygrid -= ycent
## Read Vlsr and eVlsr.
zgrid = Vlsr
wgrid = 1./eVlsr**2. ## Weight fitting by the reciprocal of uncertainty squared.
# Fit using `astropy.modeling`.
gradfit = modeling.polynomial.Polynomial2D(1)
fitter = modeling.fitting.LevMarLSQFitter()
gradfit = fitter(gradfit,
xgrid[mask&np.isfinite(Vlsr)],
ygrid[mask&np.isfinite(Vlsr)],
Vlsr[mask&np.isfinite(Vlsr)],
weights = wgrid[mask&np.isfinite(Vlsr)])
# Generate a map of predicted Vlsr.
Vlsr_predicted = gradfit(xgrid, ygrid)
return gradfit, fitter, Vlsr_predicted
def convertAngle(angle):
'''
The function that converts the numpy angles to PA [E of N], to a (-180, 180)
range.
Input
------
angle: in degrees.
'''
# For angles in the 4th quadrant, unwrap after move the origin to N.
if (angle >= -180.) and (angle <= -90.): ## convert to E of N
angle = angle + 270.
# For others, just move the origin to N.
else:
angle = angle - 90.
return angle
def readGradient(gradfit, fitter, header, reg):
'''
The function that converts the `astropy.modeling` object to physical values.
Input
------
gradfit:
Output
------
GradMag: The gradient magnitude, in km/s/pc. [`astropy.units` object]
eGradMag: The uncertainty in the gradient magnitude measurement.
GradPA: The position angle of the fitted velocity graidnet, in degrees.
[E of N]
eGradPA: The uncertainty in the position angle measurement.
'''
# Calculate the pixel scale (corresponding physical scale at the region distance).
pixscale = np.radians(abs(header['CDELT1']))*distances[reg]
# Convert the gradients to physical units (km/s/pc).
gradx = gradfit.parameters[1]*u.km/u.s/pixscale
grady = gradfit.parameters[2]*u.km/u.s/pixscale
# Calculate the magnitude and the PA based on the converted x- and y-components.
GradMag = (np.sqrt(gradx**2.+grady**2.)).to(u.km/u.s/u.pc).value ## in km/s/pc
GradPA = convertAngle(np.degrees(np.arctan2(grady.value, gradx.value))) ## in degrees
# Estimate the uncertainty from the covariant matrix.
## the raw parameters
x, y = gradfit.parameters[1], gradfit.parameters[2]
## uncertainties in x and y
sigx = np.sqrt(fitter.fit_info['param_cov'][1, 1])
sigy = np.sqrt(fitter.fit_info['param_cov'][2, 2])
## propagation to GradMag
eGradMag = np.sqrt((sigx*x/np.sqrt(x**2.+y**2.))**2.+(sigy*y/np.sqrt(x**2.+y**2.))**2.)
eGradMag *= u.km/u.s/pixscale ## Convert to physical units.
eGradMag = eGradMag.to(u.km/u.s/u.pc).value ## in km/s/pc
## propagation to GradPA
eGradPA = np.sqrt((sigx*(1./(1.+(y/x)**2.))*(-y/x**2.))**2.+\
(sigy*(1./(1.+(y/x)**2.))*(1./x))**2.)
eGradPA = np.degrees(eGradPA) ## Convert to degrees.
return GradMag, eGradMag, GradPA, eGradPA
#### blow are plotting functions; consider moving to a separate script for orga
# a plotting function
def plotTable1Sigmas(table1, plotNewWeight = True):
# Read values of Tkin and SigmaNH3 from BM and Ladd+94.
SigmaNH3 = table1['SigmaNH3'].values ## in km/s
Tkin = table1['Tkin'].values ## in Kelvin
# SigmaTot from Goodman93 Table1
SigmaTot0 = table1['SigmaTot'].values
# SigmaTot using Tkin and SigmaNH3 from BM and Ladd+94, assuming 2.33 a.m.u.
SigmaTot1 = np.sqrt((SigmaNH3*u.km/u.s)**2.
-c.k_B*Tkin*u.K/mass['NH3']
+c.k_B*Tkin*u.K/(2.33*u.u)).to(u.km/u.s).value
# SigmaTot using Tkin and SigmaNH3 from BM and Ladd+94, assuming 2.37 a.m.u.
SigmaTot2 = np.sqrt((SigmaNH3*u.km/u.s)**2.
-c.k_B*Tkin*u.K/mass['NH3']
+c.k_B*Tkin*u.K/mass['average']).to(u.km/u.s).value
# Plot and examine.
fig = plt.figure(figsize = (14., 10.))
ax = fig.gca()
if plotNewWeight:
ax.set_ylim(.18, .51)
else:
ax.set_ylim(.18, .47)
ax.plot(SigmaTot0,
linestyle = 'none',
marker = '.',
markersize = 32.,
markeredgecolor = ssk_colors[3],
markerfacecolor = colors.to_rgba(ssk_colors[3], alpha = .2),
lw = 3.,
label = '$\sigma_{tot}$ from Goodman+93')
ax.plot(SigmaTot1,
linestyle = 'none',
marker = '.',
markersize = 32.,
markeredgecolor = ssk_colors[5],
markerfacecolor = colors.to_rgba(ssk_colors[5], alpha = .2),
lw = 3.,
label = 'based on $T_{kin}$ and $\sigma_{{NH}_3}$ from BM and Ladd+94,\nassuming 2.33 a.m.u.')
if plotNewWeight:
ax.plot(SigmaTot2,
linestyle = 'none',
marker = '.',
markersize = 32.,
markeredgecolor = ssk_colors[4],
markerfacecolor = colors.to_rgba(ssk_colors[4], alpha = .2),
lw = 3.,
label = 'based on $T_{kin}$ and $\sigma_{{NH}_3}$ from BM and Ladd+94,\nassuming 2.37 a.m.u.')
ax.vlines(range(len(SigmaTot0)), *ax.get_ylim(),
linestyle = ':')
ax.legend(loc = 'upper left',
fontsize = 22,
frameon = True,
edgecolor = 'none',
facecolor = 'w',
framealpha = .85)
ax.set_xticks(range(len(SigmaTot0)))
ax.set_xticklabels(table1['ID'].values,
size = 14.,
rotation = 90)
ax.set_xlim(.5, len(SigmaTot0)-.5)
ax.set_ylabel('$\sigma_{tot}$ [km s$^{-1}$]')
return fig, ax
# a plotting function
def plotDroplet(reg, core, list_dictionaries, annotate = True):
'''
The function used to plot the droplet for examining the boundary definition.
Input
------
reg: 'L1688' or 'B18'
core: The core number. From 1 to 12 for 'L1688', and from 1 to 6 for 'B18'.
L1688 has an 'extra' core.
list_dictionaries: list of data dictionaries in the order of dict_data,
dict_masks, dict_YSOs, and dict_Vlsr_predicted.
Output
------
fig: matplotlib figure instantce
'''
dict_data, dict_masks, dict_YSOs, dict_Vlsr_predicted = list_dictionaries
mask = dict_masks[reg][core]
header = dict_data[reg]['header_GAS']
wcs_GAS = wcs.WCS(header)
frame = frameMask(mask)
list_images = [dict_data[reg]['colden'],
dict_data[reg]['temp'],
dict_data[reg]['Tpeak'],
dict_data[reg]['Sigma'],
dict_data[reg]['Vlsr'],
dict_Vlsr_predicted[reg][core]]
list_names = [r'$N_{H_2}$',
r'$T_{dust}$',
r'$T_{peak}$',
r'$\sigma_{{NH}_3}$',
r'$V_{LSR}$',
r'$Pred. V_{LSR}$']
list_norms = [colors.LogNorm(np.nanmedian(list_images[0][mask])/5.,
np.nanmedian(list_images[0][mask])*2.),
colors.Normalize(np.nanmedian(list_images[1][mask])-3.,
np.nanmedian(list_images[1][mask])+3.),
colors.LogNorm(np.nanmedian(list_images[2][mask])/5.,
np.nanmedian(list_images[2][mask])*2.),
colors.Normalize(.05, .45),
colors.Normalize(np.nanmedian(list_images[4][mask])-
(np.nanmax(list_images[4][mask])-np.nanmin(list_images[4][mask])),
np.nanmedian(list_images[4][mask])+
(np.nanmax(list_images[4][mask])-np.nanmin(list_images[4][mask]))),
colors.Normalize(np.nanmedian(list_images[4][mask])-
(np.nanmax(list_images[4][mask])-np.nanmin(list_images[4][mask])),
np.nanmedian(list_images[4][mask])+
(np.nanmax(list_images[4][mask])-np.nanmin(list_images[4][mask])))]
list_cmaps = ['Greys',
'YlOrRd_r',
'Greys',
'YlGnBu',
'RdYlBu_r',
'RdYlBu_r']
nrows, ncols = 3, 2
fig = plt.figure(figsize = (20., 20./ncols*nrows/frame[2]*frame[3]))
figLeft, figRight, figBottom, figTop = .085, .985, .07, .98
gapHorizontal, gapVertical = .005, .01
subplotWidth = (figRight-figLeft - gapHorizontal*(ncols-1.))/ncols
subplotHeight = (figTop-figBottom - gapVertical*(nrows-1.))/nrows
scalebar = np.array([.02, .05, .1, .2])
pixscale = (distances[reg]*np.radians(abs(header['CDELT1']))).to(u.pc).value
scalebar_pix = scalebar/pixscale
scalebar = scalebar[np.argmin(abs(scalebar_pix-.25*frame[2]))]
scalebar_pix = scalebar_pix[np.argmin(abs(scalebar_pix-.25*frame[2]))]
# centroid ####
mapTpeak = dict_data[reg]['Tpeak']
meshx, meshy = np.meshgrid(np.arange(mask.shape[1]), np.arange(mask.shape[0]))
#stat = statBasic2D(mask.astype(float)[mask], (meshy[mask], meshx[mask])) ## no weighting
stat = statBasic2D(mapTpeak[mask], (meshy[mask], meshx[mask])) ## weight by Tpeak
stat.calculate()
ceny, cenx = stat.mom1
if annotate:
fig.text(.5, .0015, 'R.A.[J2000]',
color = 'k',
weight = 'black',
verticalalignment = 'bottom',
horizontalalignment = 'center')
fig.text(.005, .5, 'Dec.[J2000]',
rotation = 90.,
color = 'k',
weight = 'black',
verticalalignment = 'center',
horizontalalignment = 'left')
for i in range(len(list_images)):
icol, irow = i%ncols, i//ncols
axis = fig.add_axes([figLeft+icol*(subplotWidth+gapHorizontal),
figBottom+(nrows-irow-1)*(subplotHeight+gapVertical),
subplotWidth, subplotHeight],
projection = wcs_GAS)
image = list_images[i]
axis.imshow(image,
cmap = list_cmaps[i],
norm = list_norms[i])
count_image = np.sum(np.isfinite(image[int(frame[1]):int(frame[1]+frame[3]),
int(frame[0]):int(frame[0]+frame[2])]))
count_mask = np.sum(mask)
if count_image < 3.*count_mask:
axis.contour(mask,
levels = [.5],
colors = 'k',
linewidths = 8.)
else:
axis.contour(mask,
levels = [.5],
colors = 'w',
linewidths = 8.)
axis.plot(dict_YSOs[reg][:, 0], dict_YSOs[reg][:, 1],
color = 'orange',
marker = '*',
markersize = 48.,
markeredgecolor = 'w',
markeredgewidth = 2.,
linestyle = 'none')
'''
# This is for plotting the shadow of the crosshair that shows the cent.
axis.plot(cenx, ceny,
color = 'r',
marker = '+',
markersize = 43.,
markeredgecolor = 'w',
markeredgewidth = 7.,
linestyle = 'none',
zorder = 1000)
'''
# centroid
axis.plot(cenx, ceny,
color = 'r',
marker = '+',
markersize = 42.,
markeredgecolor = 'r',
markeredgewidth = 3.,
linestyle = 'none',
zorder = 1001)
if i == 3:
Tkin_median = np.nanmedian(dict_data[reg]['Tkin'][int(frame[1]):int(frame[1]+frame[3]),
int(frame[0]):int(frame[0]+frame[2])])
NT_sonic = np.sqrt(c.k_B*Tkin_median*u.K/mass['NH3']
+c.k_B*Tkin_median*u.K/mass['average'])
NT_sonic = NT_sonic.to(u.km/u.s).value
axis.contour((list_images[i] < NT_sonic),
levels = [.5],
colors = 'r',
linewidths = 3.)
axis.plot([frame[0]+7./8.*frame[2], frame[0]+7./8.*frame[2]-scalebar_pix],
[frame[1]+6./7.*frame[3], frame[1]+6./7.*frame[3]],
color = 'k',
linewidth = 5.)
axis.text(frame[0]+7./8.*frame[2]-.5*scalebar_pix, frame[1]+4./5.*frame[3], '%.2f pc'%scalebar,
color = 'k',
weight = 'bold',
verticalalignment = 'center',
horizontalalignment = 'center')
axis.fill_between([frame[0]+7./8.*frame[2]+.1*scalebar_pix, frame[0]+7./8.*frame[2]-1.1*scalebar_pix],
frame[1]+(6./7.+4./5.)/2.*frame[3] - 1./14.*frame[3],
frame[1]+(6./7.+4./5.)/2.*frame[3] + 1./14.*frame[3],
color = 'w',
linewidth = 0.,
alpha = .4)
'''
axis.fill_between([frame[0]+7./8.*frame[2]+.1*scalebar_pix, frame[0]+7./8.*frame[2]-1.1*scalebar_pix],
frame[1]+(6./7.+4./5.)/2.*frame[3] - 1./14.*frame[3],
frame[1]+(6./7.+4./5.)/2.*frame[3] + 1./14.*frame[3],
color = 'none',
edgecolor = 'k',
linewidth = 1.)
'''
if annotate:
corner_max = np.max(image[int(frame[1]):int(frame[1]+.9*frame[3]),
int(frame[0]):int(frame[0]+.1*frame[2])])
corner_min = np.min(image[int(frame[1]):int(frame[1]+.9*frame[3]),
int(frame[0]):int(frame[0]+.1*frame[2])])
if (corner_max >= list_norms[i].vmax):
axis.text(frame[0]+.1*frame[2], frame[1]+.9*frame[3], list_names[i],
color = 'w',
weight = 'bold',
horizontalalignment = 'center',
verticalalignment = 'center')
elif (i in [4, 5]) and (corner_min < list_norms[i].vmin):
axis.text(frame[0]+.1*frame[2], frame[1]+.9*frame[3], list_names[i],
color = 'w',
weight = 'bold',
horizontalalignment = 'center',
verticalalignment = 'center')
else:
axis.text(frame[0]+.1*frame[2], frame[1]+.9*frame[3], list_names[i],
color = 'k',
weight = 'bold',
horizontalalignment = 'center',
verticalalignment = 'center')
beam_center = tuple(wcs_GAS.wcs_pix2world([[frame[0]+1./6.*frame[2], frame[1]+1./6.*frame[3]]], 0)[0]*u.deg)
beam_size = header['BMAJ']/2. * u.degree
beam = wcsaxes.SphericalCircle(beam_center,
beam_size,
edgecolor = 'w',
facecolor = 'k',
linewidth = 2.,
zorder = 999,
transform = axis.get_transform('fk5'))
axis.add_patch(beam)
axis.set_xlim(frame[0], frame[0]+frame[2])
axis.set_ylim(frame[1], frame[1]+frame[3])
axis.coords[0].set_major_formatter('hh:mm:ss')
axis.coords[0].set_ticks(size = 8.)
axis.coords[1].set_ticks(size = 8.)
if irow != (nrows-1):
axis.coords[0].set_ticklabel_visible(False)
if icol != 0:
axis.coords[1].set_ticklabel_visible(False)
return fig
# a plotting function
def plotRegion(reg, list_dictionaries, chooseStructure = None, annotate = True):
'''
The function used to plot the droplet for examining the boundary definition.
Input
------
reg: 'L1688' or 'B18'
core: The core number. From 1 to 12 for 'L1688', and from 1 to 6 for 'B18'.
L1688 has an 'extra' core.
Output
------
fig: matplotlib figure instantce
'''
if reg == 'L1688' and chooseStructure in list(range(1, 13))+['extra']:
core = chooseStructure
plotStructure = True
elif reg == 'B18' and chooseStructure in range(1, 7):
core = chooseStructure
plotStructure = True
elif chooseStructure is None:
core = -1
plotStructure = False
else:
raise ValueError('"chooseStructure" is an integer. 1-12 for L1688; 1-6 for B18.')
####
dict_data, dict_masks, dict_YSOs, dict_Vlsr_predicted = list_dictionaries
header = dict_data[reg]['header_GAS']
wcs_GAS = wcs.WCS(header)
if reg == 'L1688':
frame = (7., 0., 261., 196.2)
elif reg == 'B18':
frame = (26., -10., 725., 290.)
if plotStructure:
mask = dict_masks[reg][core]
frameCore = frameMask(mask)
list_images = [dict_data[reg]['colden'],
dict_data[reg]['temp'],
dict_data[reg]['Tpeak'],
dict_data[reg]['Tkin'],
dict_data[reg]['Vlsr'],
dict_data[reg]['Sigma']]
list_names = [r'$N_{H_2}$',
r'$T_{dust}$',
r'$T_{peak}$',
r'$T_{kin}$',
r'$V_{LSR}$',
r'$\sigma_{{NH}_3}$']
norm_Vlsr = colors.Normalize(2.5, 4.5) if reg == 'L1688'\
else colors.Normalize(5.5, 7.)
list_norms = [colors.LogNorm(1e21, 1e23),
colors.Normalize(0., 30.),
colors.LogNorm(.5, 30.),
colors.Normalize(0., 30.),
norm_Vlsr,
colors.Normalize(.05, .45)]
list_cmaps = ['Greys',
'YlOrRd_r',
'Greys',
'YlOrRd_r',
'RdYlBu_r',
'YlGnBu']
nrows, ncols = 3, 2
if reg == 'L1688':
fig = plt.figure(figsize = (16., 18.))
figLeft, figRight, figBottom, figTop = .085, .985, .07, .98
listStructures = list(range(1, 13))+['extra']
markersizeYSOs = 8.
elif reg == 'B18':
fig = plt.figure(figsize = (20., 12.))
figLeft, figRight, figBottom, figTop = .085, .985, .09, .99
listStructures = range(1, 7)
markersizeYSOs = 11.
gapHorizontal, gapVertical = .005, .01
subplotWidth = (figRight-figLeft - gapHorizontal*(ncols-1.))/ncols
subplotHeight = (figTop-figBottom - gapVertical*(nrows-1.))/nrows
scalebar = .5 if reg == 'L1688' else 1. ## pc
pixscale = (distances[reg]*np.radians(abs(header['CDELT1']))).to(u.pc).value
scalebar_pix = scalebar/pixscale
if annotate:
fig.text(.5, .0015, 'R.A.[J2000]',
color = 'k',
weight = 'black',
verticalalignment = 'bottom',
horizontalalignment = 'center')
fig.text(.005, .5, 'Dec.[J2000]',
rotation = 90.,
color = 'k',
weight = 'black',
verticalalignment = 'center',
horizontalalignment = 'left')
for i in range(len(list_images)):
icol, irow = i%ncols, i//ncols
axis = fig.add_axes([figLeft+icol*(subplotWidth+gapHorizontal),
figBottom+(nrows-irow-1)*(subplotHeight+gapVertical),
subplotWidth, subplotHeight],
projection = wcs_GAS)
image = list_images[i]
axis.imshow(image,
cmap = list_cmaps[i],
norm = list_norms[i])
for j, structure in enumerate(listStructures):
axis.contour(dict_masks[reg][structure],
levels = [.5],
colors = 'w',
linewidths = 3.)
axis.contour(dict_masks[reg][structure],
levels = [.5],
colors = ssk_colors[j],
linewidths = 2.)
'''
count_image = np.sum(np.isfinite(image[int(frame[1]):int(frame[1]+frame[3]),
int(frame[0]):int(frame[0]+frame[2])]))
count_mask = np.sum(mask)
if count_image < 3.*count_mask:
axis.contour(mask,
levels = [.5],
colors = 'k',
linewidths = 8.)
else:
axis.contour(mask,
levels = [.5],
colors = 'w',
linewidths = 8.)
'''
if i in [1, 3]:
axis.plot(dict_YSOs[reg][:, 0], dict_YSOs[reg][:, 1],
color = 'orange',
marker = '*',
markersize = markersizeYSOs,
markeredgecolor = 'k',
linestyle = 'none')
else:
axis.plot(dict_YSOs[reg][:, 0], dict_YSOs[reg][:, 1],
color = 'orange',
marker = '*',
markersize = markersizeYSOs,
markeredgecolor = 'w',
linestyle = 'none')
if plotStructure:
if i in [1, 3, 4]:
axis.fill_between([frameCore[0], frameCore[0]+frameCore[2]],
frameCore[1], frameCore[1]+frameCore[3],
edgecolor = 'w',
color = 'none',
linewidth = 3.)
axis.fill_between([frameCore[0], frameCore[0]+frameCore[2]],
frameCore[1], frameCore[1]+frameCore[3],
edgecolor = 'k',
color = 'none',
linewidth = 2.)
else:
axis.fill_between([frameCore[0], frameCore[0]+frameCore[2]],
frameCore[1], frameCore[1]+frameCore[3],
edgecolor = 'w',
color = 'none',
linewidth = 3.)
axis.fill_between([frameCore[0], frameCore[0]+frameCore[2]],
frameCore[1], frameCore[1]+frameCore[3],
edgecolor = 'k',
color = 'none',
linewidth = 2.)
'''
if i == 5:
Tkin_median = np.nanmedian(dict_data[reg]['Tkin'][mask])
NT_sonic = np.sqrt(c.k_B*Tkin_median*u.K/mass['NH3']
+c.k_B*Tkin_median*u.K/mass['average'])
NT_sonic = NT_sonic.to(u.km/u.s).value
axis.contour((list_images[i] < NT_sonic),
levels = [.5],
colors = 'r',
linewidths = 2.)
'''
axis.plot([frame[0]+7./8.*frame[2], frame[0]+7./8.*frame[2]-scalebar_pix],
[frame[1]+1./7.*frame[3], frame[1]+1./7.*frame[3]],
color = 'k',
linewidth = 5.)
axis.text(frame[0]+7./8.*frame[2]-.5*scalebar_pix, frame[1]+1./5.*frame[3], '%.1f pc'%scalebar,
color = 'k',
weight = 'bold',
verticalalignment = 'center',
horizontalalignment = 'center')
axis.fill_between([frame[0]+7./8.*frame[2]+.1*scalebar_pix, frame[0]+7./8.*frame[2]-1.1*scalebar_pix],
frame[1]+(1./7.+1./5.)/2.*frame[3] - 1./14.*frame[3],
frame[1]+(1./7.+1./5.)/2.*frame[3] + 1./12.*frame[3],
color = 'w',
linewidth = 0.,
alpha = .4)
'''
axis.fill_between([frame[0]+7./8.*frame[2]+.1*scalebar_pix, frame[0]+7./8.*frame[2]-1.1*scalebar_pix],
frame[1]+(6./7.+4./5.)/2.*frame[3] - 1./14.*frame[3],
frame[1]+(6./7.+4./5.)/2.*frame[3] + 1./14.*frame[3],
color = 'none',
edgecolor = 'k',
linewidth = 1.)
'''
if annotate:
axis.text(frame[0]+.1*frame[2], frame[1]+.9*frame[3], list_names[i],
color = 'k',
weight = 'bold',
horizontalalignment = 'center',
verticalalignment = 'center')
beam_center = tuple(wcs_GAS.wcs_pix2world([[frame[0]+1./6.*frame[2], frame[1]+1./6.*frame[3]]], 0)[0]*u.deg)
beam_size = header['BMAJ']/2. * u.degree
beam = wcsaxes.SphericalCircle(beam_center,
beam_size,
edgecolor = 'w',
facecolor = 'k',
zorder = 999,
transform = axis.get_transform('fk5'))
axis.add_patch(beam)
if annotate:
axis.text(frame[0]+1./6.*frame[2], frame[1]+1./6.*frame[3]+20., 'beam',
style = 'italic',
weight = 'bold',
horizontalalignment = 'center',
verticalalignment = 'center')
axis.set_xlim(frame[0], frame[0]+frame[2])
axis.set_ylim(frame[1], frame[1]+frame[3])
axis.coords[0].set_major_formatter('hh:mm')
axis.coords[1].set_major_formatter('dd:mm')
axis.coords[0].set_ticks(size = 8.)
axis.coords[1].set_ticks(size = 8.)
if irow != (nrows-1):
axis.coords[0].set_ticklabel_visible(False)
if icol != 0:
axis.coords[1].set_ticklabel_visible(False)
return fig
# a plotting function
def plotTpeakSigma(list_dictionaries, xscale = 'log'):
###
rcParams['figure.subplot.left'] = .09
rcParams['figure.subplot.right'] = .97
rcParams['figure.subplot.bottom'] = .12
rcParams['figure.subplot.top'] = .96
rcParams['font.size'] = 30
if xscale == 'log':
rcParams['xtick.major.pad'] = 11
elif xscale == 'linear':
rcParams['xtick.major.pad'] = 5
###
dict_data, dict_masks, dict_YSOs, dict_Vlsr_predicted = list_dictionaries
fig, ax = plt.subplots(figsize = (14., 7.),
ncols = 2)
if xscale == 'log':
xmin, xmax = .2, 8.5
elif xscale == 'linear':
xmin, xmax = .03, 5.
ymin, ymax = .02, .92
#
Sigma_SonicNT = (np.sqrt(c.k_B*10.*u.K/mass['NH3']+c.k_B*10.*u.K/mass['average'])).to(u.km/u.s).value
Sigma_halfSonicNT = (np.sqrt((c.k_B*10.*u.K/mass['NH3']+.5**2.*c.k_B*10.*u.K/mass['average']))).to(u.km/u.s).value
for i, reg in enumerate(['L1688', 'B18']):
# list of structures
if reg == 'L1688':
listStructures = list(range(1, 13))+['extra']
elif reg == 'B18':
listStructures = range(1, 7)
## load the data
mapX = dict_data[reg]['Tpeak']
mapY = dict_data[reg]['Sigma']
### masking
maskData = np.isfinite(mapX) & np.isfinite(mapY)
### all points and contours of their distribution
if xscale == 'log':
hist2D = np.histogram2d(mapX[maskData], mapY[maskData],
[np.logspace(np.log10(xmin), np.log10(xmax), 20),
np.linspace(ymin, ymax, 20)])
xBinCent = 10.**(np.log10(hist2D[1])[:-1]+.5*np.diff(np.log10(hist2D[1])))
yBinCent = hist2D[2][:-1]+.5*np.diff(hist2D[2])
elif xscale == 'linear':
hist2D = np.histogram2d(mapX[maskData], mapY[maskData],
[np.linspace(xmin, xmax, 20),
np.linspace(ymin, ymax, 20)])
xBinCent = hist2D[1][:-1]+.5*np.diff(hist2D[1])
yBinCent = hist2D[2][:-1]+.5*np.diff(hist2D[2])
height2D = hist2D[0]/np.sum(hist2D[0])
n = 5000
## accumulative contours
t = np.linspace(0., height2D.max(), n)
integral = ((height2D >= t[:, None, None]) * height2D).sum(axis=(1,2))
f = scipy.interpolate.interp1d(integral, t)
contourLevels = f(np.array([.95, .75, .5, .25]))
## plot points within the cores
axis = ax[i]
mask_all = np.zeros(maskData.shape, dtype = bool)
for j, structure in enumerate(listStructures[::-1]):
### mask of the core
mask_core = maskData & dict_masks[reg][structure]
### plotting
alphaStructures = .5
axis.plot(mapX[mask_core], mapY[mask_core],
linestyle = 'none',
marker = 'o',
color = ssk_colors[j],
markeredgecolor = 'none',
markersize = 5.,
alpha = alphaStructures)
### record the masks
mask_all = mask_all | mask_core
### plotting the distribution of all points in contours
CS = axis.contour(xBinCent, yBinCent, height2D.T,
levels = contourLevels,
colors = 'k',
linewidths = [2., .5, .5, .5],
zorder = 1)
#### labeling inline
fmt = {}
strs = ['95%', '75%', '50%', '25%']
#strs = ['95', '75', '50', '25']
for l, s in zip(CS.levels, strs):
fmt[l] = s
axis.clabel(CS, CS.levels,
inline = True,
inline_spacing = 1.5,
fmt = fmt,
fontsize = 12,
use_clabeltext = True)
### plotting the rest of the points
axis.plot(mapX[~mask_all], mapY[~mask_all],
linestyle = 'none',
marker = 'o',
color = 'none',
markeredgecolor = 'k',
markersize = 3.,
alpha = .1,
zorder = 0)
### Plot the expected line widths
axis.hlines([Sigma_SonicNT, Sigma_halfSonicNT], xmin, xmax,
linestyles = ['--', ':'],
colors = 'k')
### adjust the plot
#### limits
axis.set_xlim(xmin, xmax)
axis.set_xscale(xscale)
axis.set_ylim(ymin, ymax)
#### ticks
#axis.set_xticks([0., 5., 10., 15., 20.])
axis.set_yticks([.2, .4, .6, .8])
if i != 0:
axis.set_yticklabels([])
if xscale == 'log':
axis.text(7., .85, reg,
weight = 'black',
horizontalalignment = 'right',
verticalalignment = 'top')
axis.text(8., Sigma_SonicNT+.01, '$\sigma_{NT}=c_{s, ave}$',
size = 14.,
horizontalalignment = 'right',
verticalalignment = 'bottom')
axis.text(8., Sigma_halfSonicNT+.01, '$\sigma_{NT}=0.5c_{s, ave}$',
size = 14.,
horizontalalignment = 'right',
verticalalignment = 'bottom')
elif xscale == 'linear':
axis.text(4.7, .85, reg,
weight = 'black',
horizontalalignment = 'right',
verticalalignment = 'top')
axis.text(4.95, Sigma_SonicNT+.01, '$\sigma_{NT}=c_{s, ave}$',
size = 14.,
horizontalalignment = 'right',
verticalalignment = 'bottom')
axis.text(4.95, Sigma_halfSonicNT+.01, '$\sigma_{NT}=0.5c_{s, ave}$',
size = 14.,
horizontalalignment = 'right',
verticalalignment = 'bottom')
axis.yaxis.set_minor_locator(ticker.AutoMinorLocator(n = 4))
axis.xaxis.set_minor_formatter(FuncFormatter2(ticks_format, interval = 2))
axis.tick_params(axis='x', which='minor', labelsize=11)
#### axis labels
#axis.set_xlabel(r'Peak T$_{A^*}$ [K]',
# labelpad = -5.)
#if i == 0:
# axis.set_ylabel('Line Width [km s$^{-1}$]')
fig.text(.5, .035, '$T_{peak}$ [main-beam; K]',
weight = 'bold',
family = 'StixGeneral',
horizontalalignment = 'center',
verticalalignment = 'center')
fig.text(.03, .5, '$\sigma_{{NH}_3}$ [km s$^{-1}$]',
rotation = 90,
weight = 'bold',
horizontalalignment = 'center',
verticalalignment = 'center')
import styles
return fig
def plotSigmas(list_dictionaries, plotSigma = 'sigma', plotRfromA = False):
###
rcParams['figure.subplot.left'] = .09
rcParams['figure.subplot.right'] = .97
rcParams['figure.subplot.bottom'] = .12
rcParams['figure.subplot.top'] = .96
rcParams['font.size'] = 30
###
dict_data, dict_masks, dict_YSOs, dict_Vlsr_predicted = list_dictionaries
#ncols, nrows = 5, 4
fig = plt.figure(figsize = (18., 18.))
#Dmax = .145
#Dmax = 1.3*0.10796408847 ####
Dmax = .13
rmin, rmax = 0., Dmax
rbins = np.linspace(rmin, rmax, 12) ####
ymin, ymax = .02, .65
#
SigmaNT_Sonic = (np.sqrt(c.k_B*10.*u.K/mass['average'])).to(u.km/u.s).value
SigmaNT_halfSonic = (np.sqrt(.5**2.*c.k_B*10.*u.K/mass['average'])).to(u.km/u.s).value
#
Sigma_SonicNT = (np.sqrt(c.k_B*10.*u.K/mass['NH3']+c.k_B*10.*u.K/mass['average'])).to(u.km/u.s).value
Sigma_halfSonicNT = (np.sqrt((c.k_B*10.*u.K/mass['NH3']+.5**2.*c.k_B*10.*u.K/mass['average']))).to(u.km/u.s).value
#
figLeft, figRight, figBottom, figTop = .06, .99, .055, .9
gapHorizontal, gapVertical = .005, .005
gapReg = .055 ## horizontal
gapExtra = .055 ## Vertical
frameWidth = (figRight-figLeft-gapReg-3.*gapHorizontal)/5.
frameHeight = (figTop-figBottom-4.*gapVertical)/5.
dict_frames = {'L1688': [], 'B18': []}
## L1688
for i in range(12):
irow, icol = i//3, i%3
frame = fig.add_axes([figLeft+gapReg+gapHorizontal+2.*frameWidth+icol*(frameWidth+gapHorizontal),
figBottom+gapExtra+frameHeight+(4-irow-1)*(frameHeight+gapVertical),
frameWidth, frameHeight])
dict_frames['L1688'].append(frame)
frame = fig.add_axes([figLeft+gapReg+gapHorizontal+2.*frameWidth,
figBottom,
frameWidth, frameHeight])
dict_frames['L1688'].append(frame)
## B18
for i in range(6):
irow, icol = i//2, i%2
frame = fig.add_axes([figLeft+icol*(frameWidth+gapHorizontal),
figBottom+gapExtra+frameHeight+(4-irow-1)*(frameHeight+gapVertical),
frameWidth, frameHeight])
dict_frames['B18'].append(frame)
#
lineSpacing = .026
for i in range(19):
# list of structures
if i < 13:
reg = 'L1688'
listStructures = list(range(1, 13))+['extra']
structure = listStructures[i]
axis = dict_frames[reg][i]
#j = i
else:
reg = 'B18'
listStructures = range(1, 7)
structure = listStructures[i-13]
axis = dict_frames[reg][i-13]
#j = i-13
hdr = dict_data[reg]['header_GAS']
mapTpeak = dict_data[reg]['Tpeak']
mapNT = dict_data[reg]['SigmaNT']
mapT = dict_data[reg]['SigmaT']
mapSigma = dict_data[reg]['Sigma']
mask = dict_masks[reg][structure]
#maskFinite = np.isfinite(mapNT)&np.isfinite(mapT)
distance = distances[reg]
# deriving the profile for pixels within Dmax
## statBasic2D (modified based on astrodendro)
meshx, meshy = np.meshgrid(np.arange(mask.shape[1]), np.arange(mask.shape[0]))
#stat = statBasic2D(mask.astype(float)[mask], (meshy[mask], meshx[mask])) ## no weighting
stat = statBasic2D(mapTpeak[mask], (meshy[mask], meshx[mask])) ## weight by Tpeak
stat.calculate()
## centroid
ceny, cenx = stat.mom1
## distance
meshrPix = np.hypot(meshx-cenx, meshy - ceny)
meshr = (meshrPix*np.radians(abs(hdr['CDELT1']))*distance).to(u.pc).value
## effective radius
Reff = (stat.radius.value*np.radians(abs(hdr['CDELT1']))*distance).to(u.pc).value
Reff *= (2.*np.sqrt(2.*np.log(2.))) #FWHM as in Goodman+ 93
Reff2 = (np.sqrt(stat.area_exact.value/np.pi)*np.radians(abs(hdr['CDELT1']))*distance).to(u.pc).value
Reff_low = np.arange(1., 30.)[np.array([np.sum(meshrPix<r)!=np.sum(mask&(meshrPix<r))
for r in np.arange(1., 30.)])][0]
Reff_low = (Reff_low*np.radians(abs(hdr['CDELT1']))*distance).to(u.pc).value
Reff_low = min(Reff_low, Reff-(1.*np.radians(abs(hdr['CDELT1']))*distance).to(u.pc).value)
Reff_high = np.arange(1., 30.)[np.array([np.sum(mask&(meshrPix<r))==np.sum(mask)
for r in np.arange(1., 30.)])][0]
Reff_high = (Reff_high*np.radians(abs(hdr['CDELT1']))*distance).to(u.pc).value
Reff_high = max(Reff_low, Reff+(1.*np.radians(abs(hdr['CDELT1']))*distance).to(u.pc).value)
if plotSigma == 'components':
## points within the mask
mask_in = mask
ptr_in, ptNT_in, ptT_in = meshr[mask_in], mapNT[mask_in], mapT[mask_in]
## all points within Dmax
mask_all = (meshr < Dmax)
ptr_all, ptNT_all, ptT_all = meshr[mask_all], mapNT[mask_all], mapT[mask_all]
### bin the points
binCent_all = rbins[:-1] + .5*np.diff(rbins)
binNT_all = np.array([np.nanmedian(ptNT_all[(ptr_all >= rbins[k])&(ptr_all < rbins[k+1])])\
for k in range(len(rbins)-1)])
binNTstd_all = np.array([np.nanstd(ptNT_all[(ptr_all >= rbins[k])&(ptr_all < rbins[k+1])])\
for k in range(len(rbins)-1)])
binT_all = np.array([np.nanmedian(ptT_all[(ptr_all >= rbins[k])&(ptr_all < rbins[k+1])])\
for k in range(len(rbins)-1)])
binTstd_all = np.array([np.nanstd(ptT_all[(ptr_all >= rbins[k])&(ptr_all < rbins[k+1])])\
for k in range(len(rbins)-1)])
# plotting
# plot
#axis = ax[i//ncols, i%ncols]
## points within the mask
### property A
axis.plot(ptr_in, ptNT_in,
linestyle = 'none',
linewidth = 0.,
marker = 'o',
color = ssk_colors[2],
markeredgecolor = 'none',
markersize = 4.)
### property B
axis.plot(ptr_in, ptT_in,
linestyle = 'none',
linewidth = 0.,
marker = 'o',
color = ssk_colors[0],
markeredgecolor = 'none',
markersize = 4.)
### bins of all
axis.fill_between(binCent_all, binNT_all-.5*binNTstd_all, binNT_all+.5*binNTstd_all,
color = ssk_colors[2],
linewidth = 0.,
edgecolor = 'none',
alpha = .15)
axis.fill_between(binCent_all, binT_all-.5*binTstd_all, binT_all+.5*binTstd_all,
color = ssk_colors[0],
linewidth = 0.,
edgecolor = 'none',
alpha = .15)
elif plotSigma == 'sigma':
## points within the mask
mask_in = mask
ptr_in, ptSigma_in = meshr[mask_in], mapSigma[mask_in]
## all points within Dmax
mask_all = (meshr < Dmax)
ptr_all, ptSigma_all = meshr[mask_all], mapSigma[mask_all]
### bin the points
binCent_all = rbins[:-1] + .5*np.diff(rbins)
binSigma_all = np.array([np.nanmedian(ptSigma_all[(ptr_all >= rbins[k])&(ptr_all < rbins[k+1])])\
for k in range(len(rbins)-1)])
binSigmastd_all = np.array([np.nanstd(ptSigma_all[(ptr_all >= rbins[k])&(ptr_all < rbins[k+1])])\
for k in range(len(rbins)-1)])
# plotting
# plot
#axis = ax[i//ncols, i%ncols]
## points within the mask
### property A
axis.plot(ptr_in, ptSigma_in,
linestyle = 'none',
linewidth = 0.,
marker = 'o',
color = ssk_colors[1],
markeredgecolor = 'none',
markersize = 4.)
### bins of all
axis.fill_between(binCent_all, binSigma_all-.5*binSigmastd_all, binSigma_all+.5*binSigmastd_all,
color = ssk_colors[1],
linewidth = 0.,
edgecolor = 'none',
alpha = .15)
## effective radius used to calculate the physical properties
'''
axis.fill_between([.045, .055], ymin, ymax,
edgecolor = 'k',
facecolor = 'none',
alpha = .55,
hatch = '//////',
linewidth = .01)
'''
axis.fill_between([Reff_low, Reff_high], ymin, ymax,
facecolor = 'k',
edgecolor = 'none',
alpha = .1)
axis.vlines(Reff, ymin, ymax,
linestyle = '-',
color = 'k')
if plotRfromA:
axis.vlines(Reff2, ymin, ymax,
linestyle = '--',
color = 'k')
### Plot the expected line widths
if plotSigma == 'components':
axis.hlines([SigmaNT_Sonic, SigmaNT_halfSonic], rmin, rmax,
linestyles = ['--', ':'],
colors = 'k')
axis.text(.125, SigmaNT_Sonic+.02, '$c_{s, ave}$',
size = 20,
horizontalalignment = 'right')
axis.text(.125, SigmaNT_halfSonic+.02, '$0.5c_{s, ave}$',
size = 20,
horizontalalignment = 'right')
elif plotSigma == 'sigma':
axis.hlines([Sigma_SonicNT, Sigma_halfSonicNT], rmin, rmax,
linestyles = ['--', ':'],
colors = 'k')
axis.text(.125, Sigma_SonicNT+.02, '$\sigma_{NT}=c_{s, ave}$',
size = 20,
horizontalalignment = 'right')
axis.text(.125, Sigma_halfSonicNT+.02, '$\sigma_{NT}=0.5c_{s, ave}$',
size = 20,
horizontalalignment = 'right')
# annotation
axis.text(.12, .6, str(structure).capitalize(),
size = 26,
horizontalalignment = 'right',
verticalalignment = 'top')
## adjust the subplot
### tick labels
if i not in [0, 3, 6, 9, 12, 13, 15, 17]:
axis.set_yticks([.3, .6])
axis.set_yticklabels([])
else:
axis.set_yticks([.3, .6])
axis.tick_params(axis='y', which='major', labelsize=24)
axis.yaxis.labelpad = -1.5
if i not in [9, 10, 11, 12, 17, 18]:
axis.set_xticks([.05, .1])
axis.set_xticklabels([])
else:
axis.set_xticks([.05, .1])
labels = axis.get_xticklabels()
axis.tick_params(axis='x', which='major', labelsize=24)
#axis.xaxis.labelpad = -5.
#plt.setp(labels, rotation=330)
if i in [9, 12, 17]:
axis.set_xlabel('$R_{eff}$ [pc]')
if plotSigma == 'sigma':
axis.set_ylabel('$\sigma_{{NH}_3}$ [km s$^{-1}$]')
elif plotSigma == 'components':
axis.set_ylabel('$\sigma_{NT}$ or $\sigma_{T}$ [km s$^{-1}$]')
### axis labels
#if (i//ncols == (nrows-1)) and (i%ncols == 0):
# axis.set_xlabel('Distance [pc]')
# axis.set_ylabel(r'$\sigma$ [km s$^{-1}$]')
### limits
axis.set_xlim(rmin, rmax)
axis.set_ylim(ymin, ymax)
fig.text(.075, .97, 'B18',
size = 34,
weight = 'black',
horizontalalignment = 'left',
verticalalignment = 'center')
fig.text(figLeft+gapHorizontal+gapReg+2.*frameWidth+.015, .97, 'L1688',
size = 34,
weight = 'black',
horizontalalignment = 'left',
verticalalignment = 'center')
# legend
if plotSigma == 'components':
#
fig.text(figLeft+gapHorizontal, figBottom+4.*lineSpacing,
'$\sigma_{T}$ of Pixels Inside',
size = 30,
color = ssk_colors[0],
horizontalalignment = 'left',
verticalalignment = 'bottom')
fig.text(figLeft+gapHorizontal, figBottom+3.*lineSpacing,
'$\sigma_{T}$ of All Pixels (binned; 1-$\sigma$)',
size = 30,
color = ssk_colors[0],
alpha = .45,
horizontalalignment = 'left',
verticalalignment = 'bottom')
fig.text(figLeft+gapHorizontal, figBottom+2.*lineSpacing,
'$\sigma_{NT}$ of Pixels Inside',
size = 30,
color = ssk_colors[2],
horizontalalignment = 'left',
verticalalignment = 'bottom')
fig.text(figLeft+gapHorizontal, figBottom+1.*lineSpacing,
'$\sigma_{NT}$ of All Pixels (binned; 1-$\sigma$)',
size = 30,
color = ssk_colors[2],
alpha = .45,
horizontalalignment = 'left',
verticalalignment = 'bottom')
fig.text(figLeft+gapHorizontal, figBottom,
'$R_{eff}$',
size = 30,
color = 'k',
horizontalalignment = 'left',
verticalalignment = 'bottom')
axfig = fig.add_axes([0., 0., 1., 1.])
axfig.plot([figLeft+gapHorizontal+2.*frameWidth, figRight],
[figBottom+frameHeight+1.5*gapVertical, figBottom+frameHeight+1.5*gapVertical],
'k-')
axfig.fill_between([figLeft, figLeft+2.*frameWidth],
figBottom-.2*lineSpacing,
figBottom+5.2*lineSpacing,
color = 'gray',
linewidth = 0.,
alpha = .15,
zorder = 999)
elif plotSigma == 'sigma':
#
fig.text(figLeft+gapHorizontal, figBottom+2.*lineSpacing,
'$\sigma_{{NH}_3}$ of Pixels Inside',
size = 30,
color = ssk_colors[1],
horizontalalignment = 'left',
verticalalignment = 'bottom')
fig.text(figLeft+gapHorizontal, figBottom+1.*lineSpacing,
'$\sigma_{{NH}_3}$ of All Pixels (binned; 1-$\sigma$)',
size = 30,
color = ssk_colors[1],
alpha = .45,
horizontalalignment = 'left',
verticalalignment = 'bottom')
fig.text(figLeft+gapHorizontal, figBottom,
'$R_{eff}$',
size = 30,
color = 'k',
horizontalalignment = 'left',
verticalalignment = 'bottom')
axfig = fig.add_axes([0., 0., 1., 1.])
axfig.plot([figLeft+gapHorizontal+2.*frameWidth, figRight],
[figBottom+frameHeight+1.5*gapVertical, figBottom+frameHeight+1.5*gapVertical],
'k-')
axfig.fill_between([figLeft, figLeft+2.*frameWidth],
figBottom-.2*lineSpacing,
figBottom+3.2*lineSpacing,
color = 'gray',
linewidth = 0.,
alpha = .15,
zorder = 999)
axfig.set_xlim(0., 1.)
axfig.set_ylim(0., 1.)
axfig.set_xticks([])
axfig.set_yticks([])
for sp in ['left', 'right', 'bottom', 'top']:
axfig.spines[sp].set_visible(False)
import styles
return fig
|
mit
|
rkmaddox/expyfun
|
expyfun/_externals/_h5io.py
|
12
|
16532
|
# -*- coding: utf-8 -*-
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import sys
import tempfile
from shutil import rmtree
from os import path as op
import numpy as np
try:
from scipy import sparse
except ImportError:
sparse = None
# Adapted from six
PY3 = sys.version_info[0] == 3
text_type = str if PY3 else unicode # noqa
string_types = str if PY3 else basestring # noqa
special_chars = {'{FWDSLASH}': '/'}
##############################################################################
# WRITING
def _check_h5py():
"""Helper to check if h5py is installed"""
try:
import h5py
except ImportError:
raise ImportError('the h5py module is required to use HDF5 I/O')
return h5py
def _create_titled_group(root, key, title):
"""Helper to create a titled group in h5py"""
out = root.create_group(key)
out.attrs['TITLE'] = title
return out
def _create_titled_dataset(root, key, title, data, comp_kw=None):
"""Helper to create a titled dataset in h5py"""
comp_kw = {} if comp_kw is None else comp_kw
out = root.create_dataset(key, data=data, **comp_kw)
out.attrs['TITLE'] = title
return out
def _create_pandas_dataset(fname, root, key, title, data):
h5py = _check_h5py()
rootpath = '/'.join([root, key])
data.to_hdf(fname, rootpath)
with h5py.File(fname, mode='a') as fid:
fid[rootpath].attrs['TITLE'] = 'pd_dataframe'
def write_hdf5(fname, data, overwrite=False, compression=4,
title='h5io', slash='error'):
"""Write python object to HDF5 format using h5py
Parameters
----------
fname : str
Filename to use.
data : object
Object to write. Can be of any of these types:
{ndarray, dict, list, tuple, int, float, str}
Note that dict objects must only have ``str`` keys. It is recommended
to use ndarrays where possible, as it is handled most efficiently.
overwrite : True | False | 'update'
If True, overwrite file (if it exists). If 'update', appends the title
to the file (or replace value if title exists).
compression : int
Compression level to use (0-9) to compress data using gzip.
title : str
The top-level directory name to use. Typically it is useful to make
this your package name, e.g. ``'mnepython'``.
slash : 'error' | 'replace'
Whether to replace forward-slashes ('/') in any key found nested within
keys in data. This does not apply to the top level name (title).
If 'error', '/' is not allowed in any lower-level keys.
"""
h5py = _check_h5py()
mode = 'w'
if op.isfile(fname):
if isinstance(overwrite, string_types):
if overwrite != 'update':
raise ValueError('overwrite must be "update" or a bool')
mode = 'a'
elif not overwrite:
raise IOError('file "%s" exists, use overwrite=True to overwrite'
% fname)
if not isinstance(title, string_types):
raise ValueError('title must be a string')
comp_kw = dict()
if compression > 0:
comp_kw = dict(compression='gzip', compression_opts=compression)
with h5py.File(fname, mode=mode) as fid:
if title in fid:
del fid[title]
cleanup_data = []
_triage_write(title, data, fid, comp_kw, str(type(data)),
cleanup_data=cleanup_data, slash=slash, title=title)
# Will not be empty if any extra data to be written
for data in cleanup_data:
# In case different extra I/O needs different inputs
title = list(data.keys())[0]
if title in ['pd_dataframe', 'pd_series']:
rootname, key, value = data[title]
_create_pandas_dataset(fname, rootname, key, title, value)
def _triage_write(key, value, root, comp_kw, where,
cleanup_data=[], slash='error', title=None):
if key != title and '/' in key:
if slash == 'error':
raise ValueError('Found a key with "/", '
'this is not allowed if slash == error')
elif slash == 'replace':
# Auto-replace keys with proper values
for key_spec, val_spec in special_chars.items():
key = key.replace(val_spec, key_spec)
else:
raise ValueError("slash must be one of ['error', 'replace'")
if isinstance(value, dict):
sub_root = _create_titled_group(root, key, 'dict')
for key, sub_value in value.items():
if not isinstance(key, string_types):
raise TypeError('All dict keys must be strings')
_triage_write(
'key_{0}'.format(key), sub_value, sub_root, comp_kw,
where + '["%s"]' % key, cleanup_data=cleanup_data, slash=slash)
elif isinstance(value, (list, tuple)):
title = 'list' if isinstance(value, list) else 'tuple'
sub_root = _create_titled_group(root, key, title)
for vi, sub_value in enumerate(value):
_triage_write(
'idx_{0}'.format(vi), sub_value, sub_root, comp_kw,
where + '[%s]' % vi, cleanup_data=cleanup_data, slash=slash)
elif isinstance(value, type(None)):
_create_titled_dataset(root, key, 'None', [False])
elif isinstance(value, (int, float)):
if isinstance(value, int):
title = 'int'
else: # isinstance(value, float):
title = 'float'
_create_titled_dataset(root, key, title, np.atleast_1d(value))
elif isinstance(value, np.bool_):
_create_titled_dataset(root, key, 'np_bool_', np.atleast_1d(value))
elif isinstance(value, string_types):
if isinstance(value, text_type): # unicode
value = np.fromstring(value.encode('utf-8'), np.uint8)
title = 'unicode'
else:
value = np.fromstring(value.encode('ASCII'), np.uint8)
title = 'ascii'
_create_titled_dataset(root, key, title, value, comp_kw)
elif isinstance(value, np.ndarray):
_create_titled_dataset(root, key, 'ndarray', value)
elif sparse is not None and isinstance(value, sparse.csc_matrix):
sub_root = _create_titled_group(root, key, 'csc_matrix')
_triage_write('data', value.data, sub_root, comp_kw,
where + '.csc_matrix_data', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indices', value.indices, sub_root, comp_kw,
where + '.csc_matrix_indices', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indptr', value.indptr, sub_root, comp_kw,
where + '.csc_matrix_indptr', cleanup_data=cleanup_data,
slash=slash)
elif sparse is not None and isinstance(value, sparse.csr_matrix):
sub_root = _create_titled_group(root, key, 'csr_matrix')
_triage_write('data', value.data, sub_root, comp_kw,
where + '.csr_matrix_data', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indices', value.indices, sub_root, comp_kw,
where + '.csr_matrix_indices', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indptr', value.indptr, sub_root, comp_kw,
where + '.csr_matrix_indptr', cleanup_data=cleanup_data,
slash=slash)
_triage_write('shape', value.shape, sub_root, comp_kw,
where + '.csr_matrix_shape', cleanup_data=cleanup_data,
slash=slash)
else:
try:
from pandas import DataFrame, Series
except ImportError:
pass
else:
if isinstance(value, (DataFrame, Series)):
if isinstance(value, DataFrame):
title = 'pd_dataframe'
else:
title = 'pd_series'
rootname = root.name
cleanup_data.append({title: (rootname, key, value)})
return
err_str = 'unsupported type %s (in %s)' % (type(value), where)
raise TypeError(err_str)
##############################################################################
# READING
def read_hdf5(fname, title='h5io', slash='ignore'):
"""Read python object from HDF5 format using h5py
Parameters
----------
fname : str
File to load.
title : str
The top-level directory name to use. Typically it is useful to make
this your package name, e.g. ``'mnepython'``.
slash : 'ignore' | 'replace'
Whether to replace the string {FWDSLASH} with the value /. This does
not apply to the top level name (title). If 'ignore', nothing will be
replaced.
Returns
-------
data : object
The loaded data. Can be of any type supported by ``write_hdf5``.
"""
h5py = _check_h5py()
if not op.isfile(fname):
raise IOError('file "%s" not found' % fname)
if not isinstance(title, string_types):
raise ValueError('title must be a string')
with h5py.File(fname, mode='r') as fid:
if title not in fid:
raise ValueError('no "%s" data found' % title)
if isinstance(fid[title], h5py.Group):
if 'TITLE' not in fid[title].attrs:
raise ValueError('no "%s" data found' % title)
data = _triage_read(fid[title], slash=slash)
return data
def _triage_read(node, slash='ignore'):
if slash not in ['ignore', 'replace']:
raise ValueError("slash must be one of 'replace', 'ignore'")
h5py = _check_h5py()
type_str = node.attrs['TITLE']
if isinstance(type_str, bytes):
type_str = type_str.decode()
if isinstance(node, h5py.Group):
if type_str == 'dict':
data = dict()
for key, subnode in node.items():
if slash == 'replace':
for key_spec, val_spec in special_chars.items():
key = key.replace(key_spec, val_spec)
data[key[4:]] = _triage_read(subnode, slash=slash)
elif type_str in ['list', 'tuple']:
data = list()
ii = 0
while True:
subnode = node.get('idx_{0}'.format(ii), None)
if subnode is None:
break
data.append(_triage_read(subnode, slash=slash))
ii += 1
assert len(data) == ii
data = tuple(data) if type_str == 'tuple' else data
return data
elif type_str == 'csc_matrix':
if sparse is None:
raise RuntimeError('scipy must be installed to read this data')
data = sparse.csc_matrix((_triage_read(node['data'], slash=slash),
_triage_read(node['indices'],
slash=slash),
_triage_read(node['indptr'],
slash=slash)))
elif type_str == 'csr_matrix':
if sparse is None:
raise RuntimeError('scipy must be installed to read this data')
data = sparse.csr_matrix((_triage_read(node['data'], slash=slash),
_triage_read(node['indices'],
slash=slash),
_triage_read(node['indptr'],
slash=slash)),
shape=_triage_read(node['shape']))
elif type_str in ['pd_dataframe', 'pd_series']:
from pandas import read_hdf
rootname = node.name
filename = node.file.filename
data = read_hdf(filename, rootname, mode='r')
else:
raise NotImplementedError('Unknown group type: {0}'
''.format(type_str))
elif type_str == 'ndarray':
data = np.array(node)
elif type_str in ('int', 'float'):
cast = int if type_str == 'int' else float
data = cast(np.array(node)[0])
elif type_str == 'np_bool_':
data = np.bool_(np.array(node)[0])
elif type_str in ('unicode', 'ascii', 'str'): # 'str' for backward compat
decoder = 'utf-8' if type_str == 'unicode' else 'ASCII'
cast = text_type if type_str == 'unicode' else str
data = cast(np.array(node).tostring().decode(decoder))
elif type_str == 'None':
data = None
else:
raise TypeError('Unknown node type: {0}'.format(type_str))
return data
# ############################################################################
# UTILITIES
def _sort_keys(x):
"""Sort and return keys of dict"""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float.
b : object
Must be same type as x1.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
"""
try:
from pandas import DataFrame, Series
except ImportError:
DataFrame = Series = type(None)
out = ''
if type(a) != type(b):
out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
elif isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' x1 missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' x2 missing key %s\n' % key
else:
out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for xx1, xx2 in zip(a, b):
out += object_diff(xx1, xx2, pre='')
elif isinstance(a, (string_types, int, float, bytes)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
pass # b must be None due to our type checking
elif isinstance(a, np.ndarray):
if not np.array_equal(a, b):
out += pre + ' array mismatch\n'
elif sparse is not None and sparse.isspmatrix(a):
# sparsity and sparse type of b vs a already checked above by type()
if b.shape != a.shape:
out += pre + (' sparse matrix a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a - b
c.eliminate_zeros()
if c.nnz > 0:
out += pre + (' sparse matrix a and b differ on %s '
'elements' % c.nnz)
elif isinstance(a, (DataFrame, Series)):
if b.shape != a.shape:
out += pre + (' pandas values a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a.values - b.values
nzeros = np.sum(c != 0)
if nzeros > 0:
out += pre + (' pandas values a and b differ on %s '
'elements' % nzeros)
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self):
new = str.__new__(self, tempfile.mkdtemp())
return new
def __init__(self):
self._path = self.__str__()
def __del__(self):
rmtree(self._path, ignore_errors=True)
|
bsd-3-clause
|
acarmel/dreampie
|
dreampielib/gui/__init__.py
|
1
|
61723
|
# Copyright 2010 Noam Yorav-Raphael
#
# This file is part of DreamPie.
#
# DreamPie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DreamPie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DreamPie. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
from os import path
import time
import tempfile
from optparse import OptionParser
import subprocess
import webbrowser
import re
from keyword import iskeyword
import logging
from logging import debug
#logging.basicConfig(format="dreampie: %(message)s", level=logging.DEBUG)
def find_data_dir():
"""
Find the data directory in which to find files.
If we are inside the source directory, build subp zips.
"""
# The data directory is normally located at dreampielib/data.
# When running under py2exe, it is in the same directory as the executable.
# Running inside the source directory is detected by the presence of a
# file called 'dreampie' in the same directory as 'dreampielib'.
from os.path import join, dirname, abspath, isfile
if hasattr(sys, 'frozen'):
return abspath(join(dirname(sys.executable), 'data'))
dreampielib_dir = dirname(dirname(abspath(__file__)))
if isfile(join(dirname(dreampielib_dir), 'dreampie')):
# We're in the source path. Build zips if needed, and return the right
# dir.
from ..subp_lib import build
build()
return join(dreampielib_dir, 'data')
data_dir = find_data_dir()
gladefile = path.join(data_dir, 'dreampie.glade')
def load_pygtk():
"""On win32, load PyGTK from subdirectory, if available."""
from os.path import join, dirname, abspath
if hasattr(sys, 'frozen'):
pygtk_dir = join(dirname(abspath(sys.executable)), 'gtk-2.0')
else:
pygtk_dir = join(dirname(dirname(dirname(abspath(__file__)))), 'gtk-2.0')
if os.path.isdir(pygtk_dir):
sys.path.insert(0, pygtk_dir)
import runtime #@UnresolvedImport
if sys.platform == 'win32':
load_pygtk()
import gobject
gobject.threads_init() #@UndefinedVariable
import gtk
from gtk import gdk, glade
import pango
import gtksourceview2
from . import gtkexcepthook
gtkexcepthook.install(gladefile)
try:
from glib import timeout_add, idle_add
except ImportError:
# In PyGObject 2.14, it's in gobject.
from gobject import timeout_add, idle_add
from .. import __version__
from .SimpleGladeApp import SimpleGladeApp
from .keyhandler import (make_keyhandler_decorator, handle_keypress,
parse_keypress_event)
from .config import Config
from .config_dialog import ConfigDialog
from .write_command import write_command
from .newline_and_indent import newline_and_indent
from .output import Output
from .folding import Folding
from .selection import Selection
from .status_bar import StatusBar
from .vadj_to_bottom import VAdjToBottom
from .history import History
from .hist_persist import HistPersist
from .autocomplete import Autocomplete
from .call_tips import CallTips
from .autoparen import Autoparen
from .crash_workaround import TextViewCrashWorkaround
from .subprocess_handler import SubprocessHandler, StartError
from .common import beep, get_text, TimeoutError
from .file_dialogs import save_dialog
from .tags import (OUTPUT, STDIN, STDOUT, STDERR, EXCEPTION, PROMPT, COMMAND,
COMMAND_DEFS, COMMAND_SEP, MESSAGE, RESULT_IND, RESULT, CURRENT_TOKEN)
from . import tags
from .update_check import update_check
from . import bug_report
INDENT_WIDTH = 4
# Default line length, by which we set the default window size
LINE_LEN = 80
# Time to wait before autocompleting, to see if the user continues to type
AUTOCOMPLETE_WAIT = 400
# Time to wait for the subprocess for a result. The subprocess may be doing
# idle jobs, and so not return a result.
SUBP_WAIT_TIMEOUT_S = .5
# Maybe someday we'll want translations...
_ = lambda s: s
# A decorator for managing sourceview key handlers
sourceview_keyhandlers = {}
sourceview_keyhandler = make_keyhandler_decorator(sourceview_keyhandlers)
def get_widget(name):
"""Create a widget from the glade file."""
xml = glade.XML(gladefile, name)
return xml.get_widget(name)
class DreamPie(SimpleGladeApp):
def __init__(self, pyexec, runfile):
"""
pyexec - the Python interpreter executable
runfile - a filename to run upon startup, or None.
"""
SimpleGladeApp.__init__(self, gladefile, 'window_main')
self.load_popup_menus()
self.set_mac_accelerators()
self.config = Config()
if self.config.get_bool('start-rpdb2-embedded'):
print 'Starting rpdb2 embedded debugger...',
sys.stdout.flush()
import rpdb2; rpdb2.start_embedded_debugger('1234', timeout=0.1)
print 'Done.'
self.window_main.set_icon_from_file(
path.join(data_dir, 'dreampie.png'))
self.textbuffer = tb = self.textview.get_buffer()
self.init_textbufferview()
# Mark where the cursor was when the popup menu was popped
self.popup_mark = tb.create_mark('popup-mark', tb.get_start_iter(),
left_gravity=True)
# Remove the page in the notebook, which was added because empty
# notebooks cause warnings
self.notebook.remove_page(0)
# A list of callbacks to call when changing the sourcebuffer
self.sv_changed = []
self.sourceview = self.create_sourcebufferview()
self.sourcebuffer = self.sourceview.get_buffer()
# A tuple (page_num, text) of the recently closed tab
self.reopen_tab_data = None
# last (font, vertical_layout) configured. If they are changed,
# configure() will resize the window and place the paned.
self.last_configured_layout = (None, None)
self.configure()
self.output = Output(self.textview)
self.folding = Folding(self.textbuffer, LINE_LEN)
self.selection = Selection(self.textview, self.sourceview,
self.sv_changed,
self.on_is_something_selected_changed)
self.status_bar = StatusBar(self.sourcebuffer, self.sv_changed,
self.statusbar)
self.vadj_to_bottom = VAdjToBottom(self.scrolledwindow_textview
.get_vadjustment())
self.history = History(self.textview, self.sourceview, self.sv_changed,
self.config)
self.recent_manager = gtk.recent_manager_get_default()
self.menuitem_recent = [self.menuitem_recent0, self.menuitem_recent1,
self.menuitem_recent2, self.menuitem_recent3]
self.recent_filenames = [None] * len(self.menuitem_recent)
self.recent_manager.connect('changed', self.on_recent_manager_changed)
self.histpersist = HistPersist(self.window_main, self.textview,
self.status_bar, self.recent_manager)
self.update_recent()
self.autocomplete = Autocomplete(self.sourceview,
self.sv_changed,
self.window_main,
self.complete_attributes,
self.complete_firstlevels,
self.get_func_args,
self.find_modules,
self.get_module_members,
self.complete_filenames,
self.complete_dict_keys,
INDENT_WIDTH)
# Hack: we connect this signal here, so that it will have lower
# priority than the key-press event of autocomplete, when active.
self.sourceview_keypress_handler = self.sourceview.connect(
'key-press-event', self.on_sourceview_keypress)
self.sv_changed.append(self.on_sv_changed)
self.call_tips = CallTips(self.sourceview, self.sv_changed,
self.window_main, self.get_func_doc,
INDENT_WIDTH)
self.autoparen = Autoparen(self.sourcebuffer, self.sv_changed,
self.is_callable_only,
self.get_expects_str,
self.autoparen_show_call_tip,
INDENT_WIDTH)
self.subp = SubprocessHandler(
pyexec, data_dir,
self.on_stdout_recv, self.on_stderr_recv, self.on_object_recv,
self.on_subp_terminated)
# Number of RPC calls that timed out and expecting results
self._n_unclaimed_results = 0
try:
self.subp.start()
except StartError, e:
msg = gtk.MessageDialog(
None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE,
_("Couldn't start subprocess: %s") % e)
_response = msg.run()
msg.destroy()
print >> sys.stderr, e
sys.exit(1)
# Is the subprocess executing a command
self.set_is_executing(False)
# Are we trying to shut down
self.is_terminating = False
self.window_main.show()
self.subp_welcome, self.subp_can_mask_sigint = (
self.call_subp(u'get_subprocess_info'))
self.show_welcome()
self.configure_subp()
self.run_init_code(runfile)
bug_report.set_subp_info(pyexec, self.subp_welcome)
if self.config.get_bool('show-getting-started'):
self.show_getting_started_dialog()
self.config.set_bool('show-getting-started', False)
self.config.save()
update_check(self.on_update_available)
def on_sv_changed(self, new_sv):
self.sourceview.disconnect(self.sourceview_keypress_handler)
self.sourceview = new_sv
self.sourcebuffer = new_sv.get_buffer()
self.sourceview_keypress_handler = self.sourceview.connect(
'key-press-event', self.on_sourceview_keypress)
def load_popup_menus(self):
# Load popup menus from the glade file. Would not have been needed if
# popup menus could be children of windows.
xml = glade.XML(gladefile, 'popup_sel_menu')
xml.signal_autoconnect(self)
self.popup_sel_menu = xml.get_widget('popup_sel_menu')
xml = glade.XML(gladefile, 'popup_nosel_menu')
xml.signal_autoconnect(self)
self.popup_nosel_menu = xml.get_widget('popup_nosel_menu')
self.fold_unfold_section_menu = xml.get_widget('fold_unfold_section_menu')
self.copy_section_menu = xml.get_widget('copy_section_menu')
self.view_section_menu = xml.get_widget('view_section_menu')
self.save_section_menu = xml.get_widget('save_section_menu')
def set_mac_accelerators(self):
# Set up accelerators suitable for the Mac.
# Ctrl-Up and Ctrl-Down are taken by the window manager, so we use
# Ctrl-PgUp and Ctrl-PgDn.
# We want it to be easy to switch, so both sets of keys are always
# active, but only one, most suitable for each platform, is displayed
# in the menu.
accel_group = gtk.accel_groups_from_object(self.window_main)[0]
menu_up = self.menuitem_history_up
UP = gdk.keyval_from_name('Up')
PGUP = gdk.keyval_from_name('Prior')
menu_dn = self.menuitem_history_down
DN = gdk.keyval_from_name('Down')
PGDN = gdk.keyval_from_name('Next')
if sys.platform != 'darwin':
menu_up.add_accelerator('activate', accel_group, PGUP,
gdk.CONTROL_MASK, 0)
menu_dn.add_accelerator('activate', accel_group, PGDN,
gdk.CONTROL_MASK, 0)
else:
menu_up.remove_accelerator(accel_group, UP, gdk.CONTROL_MASK)
menu_up.add_accelerator('activate', accel_group, PGUP,
gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_up.add_accelerator('activate', accel_group, UP,
gdk.CONTROL_MASK, 0)
menu_dn.remove_accelerator(accel_group, DN, gdk.CONTROL_MASK)
menu_dn.add_accelerator('activate', accel_group, PGDN,
gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_dn.add_accelerator('activate', accel_group, DN,
gdk.CONTROL_MASK, 0)
def on_cut(self, _widget):
return self.selection.cut()
def on_copy(self, _widget):
return self.selection.copy()
def on_copy_commands_only(self, _widget):
return self.selection.copy_commands_only()
def on_save_commands_only(self, _widget):
"""Prompts for a filename and saves only the selected commands
to the file"""
def save_code(filename):
self.selection.save_commands_only(filename)
save_dialog(save_code, _("Choose where to save the code"),
self.main_widget, _("Python Files"), "*.py", None)
def on_paste(self, _widget):
return self.selection.paste()
def on_upward_find(self, _widget):
self.find(is_upward=True)
def on_downward_find(self, _widget):
self.find(is_upward=False)
def find(self, is_upward):
tb = self.textbuffer
sb = self.sourcebuffer
search_str = get_text(sb, sb.get_start_iter(), sb.get_end_iter())
if not search_str:
self.status_bar.set_status(_(
"Type the text you want to search for in the code box, and "
"press Ctrl-F"))
beep()
return
if tb.get_has_selection():
sel_start, sel_end = tb.get_selection_bounds()
it = sel_start if is_upward else sel_end
elif self.textview.has_focus():
it = tb.get_iter_at_mark(tb.get_insert())
else:
it = tb.get_end_iter()
flags = gtk.TEXT_SEARCH_VISIBLE_ONLY
if is_upward:
match = it.backward_search(search_str, flags)
if match is None:
match = tb.get_end_iter().backward_search(search_str, flags)
else:
match = it.forward_search(search_str, flags)
if match is None:
match = tb.get_start_iter().forward_search(search_str, flags)
if match is None:
beep()
else:
start, end = match
tb.select_range(start, end)
self.textview.scroll_to_iter(start, 0)
def on_is_something_selected_changed(self, is_something_selected):
self.menuitem_cut.props.sensitive = is_something_selected
self.menuitem_copy.props.sensitive = is_something_selected
self.menuitem_copy_commands_only.props.sensitive = is_something_selected
self.menuitem_save_commands_only.props.sensitive = is_something_selected
self.menuitem_interrupt.props.sensitive = not is_something_selected
# Source buffer, Text buffer
def init_textbufferview(self):
tv = self.textview
tb = self.textbuffer
tv.set_wrap_mode(gtk.WRAP_CHAR)
self.textview_crash_workaround = TextViewCrashWorkaround(tv)
tags.add_tags(tb)
tv.connect('key-press-event', self.on_textview_keypress)
tv.connect('focus-in-event', self.on_textview_focus_in)
def get_char_width_height(self):
tv = self.textview
context = tv.get_pango_context()
metrics = context.get_metrics(tv.style.font_desc,
context.get_language())
charwidth = pango.PIXELS(metrics.get_approximate_digit_width())
# I don't know why +1
charheight = pango.PIXELS(metrics.get_ascent() + metrics.get_descent())+1
return charwidth, charheight
def set_window_size(self, vertical_layout):
charwidth, charheight = self.get_char_width_height()
if vertical_layout:
# I don't know why I have to add 2, but it works.
width = charwidth*(LINE_LEN+2)
height = charheight*30
else:
width = charwidth*((LINE_LEN-10)*2+2)
height = charheight*26
self.window_main.resize(width, height)
# Set the position of the paned. We wait until it is exposed because
# then its max_position is meaningful.
# In vertical layout we set it to maximum, since the sourceview has
# a minimum height.
def callback(_widget, _event):
if vertical_layout:
pane = self.vpaned_main
pane.set_position(pane.props.max_position)
else:
pane = self.hpaned_main
pane.set_position(pane.props.max_position // 2)
self.sourceview.disconnect(callback_id)
callback_id = self.sourceview.connect('expose-event', callback)
def create_sourcebufferview(self, page_num=None):
sb = gtksourceview2.Buffer()
sb.connect('notify::cursor-position', self.on_sourceview_move_cursor)
sb.create_tag(CURRENT_TOKEN, background='dark blue')
sv = gtksourceview2.View(sb)
sv.show()
sv.connect('focus-in-event', self.on_sourceview_focus_in)
sv.connect('button-press-event', self.on_sourceview_button_press_event)
_charwidth, charheight = self.get_char_width_height()
self.configure_sourceview(sv)
lm = gtksourceview2.LanguageManager()
lm.set_search_path([path.join(data_dir, 'language-specs')])
sb.set_language(lm.get_language('python'))
scroll = gtk.ScrolledWindow()
scroll.show()
scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scroll.add(sv)
scroll.set_size_request(-1, charheight * 4)
lbl = gtk.Label(' ')
if page_num is None:
page_num = self.notebook.get_current_page() + 1
self.notebook.insert_page(scroll, lbl, page_num)
self.notebook.set_current_page(page_num)
sv.grab_focus()
return sv
def sv_scroll_cursor_onscreen(self):
self.sourceview.scroll_mark_onscreen(self.sourcebuffer.get_insert())
def on_textview_focus_in(self, _widget, _event):
# Clear the selection of the sourcebuffer
self.sourcebuffer.move_mark(self.sourcebuffer.get_selection_bound(),
self.sourcebuffer.get_iter_at_mark(
self.sourcebuffer.get_insert()))
def on_sourceview_focus_in(self, _widget, _event):
# Clear the selection of the textbuffer
self.textbuffer.move_mark(self.textbuffer.get_selection_bound(),
self.textbuffer.get_iter_at_mark(
self.textbuffer.get_insert()))
def on_sourceview_button_press_event(self, _widget, event):
if event.button == 2 and self.textbuffer.get_has_selection():
commands = self.selection.get_commands_only()
self.sourcebuffer.insert_interactive_at_cursor(commands, True)
return True
def on_sourceview_move_cursor(self, buffer, data=None):
if not buffer.props.has_selection:
sb = self.sourcebuffer
text = get_text(sb, sb.get_start_iter(), sb.get_end_iter())
index = sb.get_iter_at_mark(sb.get_insert()).get_offset()
sb.remove_tag_by_name(CURRENT_TOKEN, sb.get_start_iter(), sb.get_end_iter())
tokens = list(re.finditer('\w+', text))
current_token = None
for match in tokens:
if match.start() <= index <= match.end():
current_token = text[match.start():match.end()]
break
if sum(1 for match in tokens if text[match.start():match.end()] == current_token) > 1:
for match in tokens:
if text[match.start():match.end()]==current_token:
sb.apply_tag_by_name(CURRENT_TOKEN, \
sb.get_iter_at_offset(match.start()), \
sb.get_iter_at_offset(match.end()))
def write(self, data, *tag_names):
self.textbuffer.insert_with_tags_by_name(
self.textbuffer.get_end_iter(), data, *tag_names)
def write_output(self, data, tag_names, onnewline=False, addbreaks=True):
"""
Call self.output.write with the given arguments, and autofold if needed.
"""
it = self.output.write(data, tag_names, onnewline, addbreaks)
if self.config.get_bool('autofold'):
self.folding.autofold(it, self.config.get_int('autofold-numlines'))
def set_is_executing(self, is_executing):
self.is_executing = is_executing
label = _(u'Execute Code') if not is_executing else _(u'Write Input')
self.menuitem_execute.child.props.label = label
self.menuitem_discard_hist.props.sensitive = not is_executing
@staticmethod
def replace_gtk_quotes(source):
# Work around GTK+ bug https://bugzilla.gnome.org/show_bug.cgi?id=610928
# in order to fix bug #525469 - replace fancy quotes with regular
# quotes.
return source.replace(u'\xa8', '"').replace(u'\xb4', "'")
def execute_source(self):
"""Execute the source in the source buffer.
"""
sb = self.sourcebuffer
source = get_text(sb, sb.get_start_iter(), sb.get_end_iter())
source = source.rstrip()
source = self.replace_gtk_quotes(source)
try:
# There's a chance that the subprocess won't reply, because it's
# busy doing "idle jobs". For most queries we can ask for an answer
# and if it doesn't arrive, cancel what we tried to do and ignore
# the answer when it comes. However, here we can't let the execute
# function run and ignore the result, so we first call 'pause_idle'.
# If we don't get a reply for pause_idle, we don't execute.
self.call_subp_noblock(u'pause_idle')
except TimeoutError:
self.subp.send_object((u'resume_idle', ()))
self._n_unclaimed_results += 1
self.status_bar.set_status(_("The subprocess is currently busy"))
beep()
return
is_ok, syntax_error_info = self.call_subp(u'execute', source)
if not is_ok:
if syntax_error_info:
msg, lineno, offset = syntax_error_info
status_msg = _("Syntax error: %s (at line %d col %d)") % (
msg, lineno+1, offset+1)
# Work around a bug: offset may be wrong, which will cause
# gtk to crash if using sb.get_iter_at_line_offset.
iter = sb.get_iter_at_line(lineno)
iter.forward_chars(offset+1)
sb.place_cursor(iter)
else:
# Incomplete
status_msg = _("Command is incomplete")
sb.place_cursor(sb.get_end_iter())
self.status_bar.set_status(status_msg)
beep()
else:
self.set_is_executing(True)
write_command(self.write, source.strip())
self.output.start_new_section()
if not self.config.get_bool('leave-code'):
sb.delete(sb.get_start_iter(), sb.get_end_iter())
self.vadj_to_bottom.scroll_to_bottom()
def send_stdin(self):
"""Send the contents of the sourcebuffer as stdin."""
sb = self.sourcebuffer
s = get_text(sb, sb.get_start_iter(), sb.get_end_iter())
if not s.endswith('\n'):
s += '\n'
self.write_output(s, [COMMAND, STDIN], addbreaks=False)
self.write('\r', COMMAND_SEP)
self.output.start_new_section()
self.vadj_to_bottom.scroll_to_bottom()
if not self.config.get_bool('leave-code'):
sb.delete(sb.get_start_iter(), sb.get_end_iter())
self.subp.write(s)
@sourceview_keyhandler('Return', 0)
def on_sourceview_return(self):
sb = self.sourcebuffer
# If we are on the first line, and it doesn't end with a ' ':
# * If we are not executing, try to execute (if failed, continue
# with normal behavior)
# * If we are executing, send the line as stdin.
insert_iter = sb.get_iter_at_mark(sb.get_insert())
if (insert_iter.equal(sb.get_end_iter())
and insert_iter.get_line() == 0
and insert_iter.get_offset() != 0
and not get_text(sb, sb.get_start_iter(),
insert_iter).endswith(' ')):
if not self.is_executing:
source = get_text(sb, sb.get_start_iter(), sb.get_end_iter())
source = source.rstrip()
source = self.replace_gtk_quotes(source)
try:
is_incomplete = self.call_subp_noblock(u'is_incomplete', source)
except TimeoutError:
is_incomplete = True
if not is_incomplete:
self.execute_source()
return True
else:
# is_executing
self.send_stdin()
return True
# If we are after too many newlines, the user probably just wanted to
# execute - notify him.
# We check if this line is empty and the previous one is.
show_execution_tip = False
if insert_iter.equal(sb.get_end_iter()):
it = sb.get_end_iter()
# This goes to the beginning of the line, and another line
# backwards, so we get two lines
it.backward_lines(1)
text = get_text(sb, it, sb.get_end_iter())
if not text.strip():
show_execution_tip = True
# We didn't execute, so newline-and-indent.
r = newline_and_indent(self.sourceview, INDENT_WIDTH)
if show_execution_tip:
self.status_bar.set_status(_(
"Tip: To execute your code, use Ctrl+Enter."))
return r
@sourceview_keyhandler('KP_Enter', 0)
def on_sourceview_kp_enter(self):
self.on_execute_command(None)
return True
@sourceview_keyhandler('Tab', 0)
def on_sourceview_tab(self):
sb = self.sourcebuffer
sel = sb.get_selection_bounds()
if not sel:
insert = sb.get_iter_at_mark(sb.get_insert())
insert_linestart = sb.get_iter_at_line(insert.get_line())
line = get_text(sb, insert_linestart, insert)
if not line.strip():
# We are at the beginning of a line, so indent - forward to next
# "tab stop"
sb.insert_at_cursor(' '*(INDENT_WIDTH - len(line)%INDENT_WIDTH))
else:
# Completion should come here
self.autocomplete.show_completions(is_auto=False, complete=True)
else:
# Indent
start, end = sel
start = sb.get_iter_at_line(start.get_line())
if not end.ends_line():
end.forward_to_line_end()
text = get_text(sb, start, end)
newtext = '\n'.join(' '+line for line in text.split('\n'))
start_offset = start.get_offset()
sb.delete(start, end)
sb.insert(end, newtext)
sb.select_range(sb.get_iter_at_offset(start_offset), end)
self.sv_scroll_cursor_onscreen()
return True
@sourceview_keyhandler('ISO_Left_Tab', 0)
def on_sourceview_shift_tab(self):
sb = self.sourcebuffer
sel = sb.get_selection_bounds()
if sel:
start, end = sel
else:
start = end = sb.get_iter_at_mark(sb.get_insert())
start = sb.get_iter_at_line(start.get_line())
if not end.ends_line():
end.forward_to_line_end()
text = get_text(sb, start, end)
lines = text.split('\n')
if not all(line.startswith(' ')
for line in lines if line.strip() != ''):
beep()
else:
newlines = [line[4:] for line in lines]
newtext = '\n'.join(newlines)
start_offset = start.get_offset()
sb.delete(start, end)
sb.insert(end, newtext)
sb.select_range(sb.get_iter_at_offset(start_offset), end)
return True
@sourceview_keyhandler('Home', 0)
def on_sourceview_home(self):
# If the cursor is already at the beginning of the line, move to the
# beginning of the text.
sb = self.sourcebuffer
insert = sb.get_iter_at_mark(sb.get_insert())
if insert.starts_line():
while insert.get_char() == ' ':
insert.forward_char()
sb.place_cursor(insert)
return True
@sourceview_keyhandler('BackSpace', 0)
def on_sourceview_backspace(self):
sb = self.sourcebuffer
insert = sb.get_iter_at_mark(sb.get_insert())
insert_linestart = sb.get_iter_at_line(insert.get_line())
line = get_text(sb, insert_linestart, insert)
if line and not line.strip():
# There are only space before us, so remove spaces up to last
# "tab stop"
delete_from = ((len(line) - 1) // INDENT_WIDTH) * INDENT_WIDTH
it = sb.get_iter_at_line_offset(insert.get_line(), delete_from)
sb.delete(it, insert)
self.sv_scroll_cursor_onscreen()
return True
return False
# The following 3 handlers are for characters which may trigger automatic
# opening of the completion list. (slash and backslash depend on path.sep)
# We leave the final decision whether to open the list to the autocompleter.
# We just notify it that the char was inserted and the user waited a while.
@sourceview_keyhandler('period', 0)
def on_sourceview_period(self):
timeout_add(AUTOCOMPLETE_WAIT, self.check_autocomplete, '.')
@sourceview_keyhandler('slash', 0)
def on_sourceview_slash(self):
timeout_add(AUTOCOMPLETE_WAIT, self.check_autocomplete, '/')
@sourceview_keyhandler('backslash', 0)
def on_sourceview_backslash(self):
timeout_add(AUTOCOMPLETE_WAIT, self.check_autocomplete, '\\')
@sourceview_keyhandler('bracketleft', 0)
def on_sourceview_bracketleft(self):
timeout_add(AUTOCOMPLETE_WAIT, self.check_autocomplete, '[')
def check_autocomplete(self, last_char):
"""
If the last char in the sourcebuffer is last_char, call
show_completions.
"""
sb = self.sourcebuffer
if self.sourceview.is_focus():
it = sb.get_iter_at_mark(sb.get_insert())
it2 = it.copy()
it2.backward_chars(1)
char = get_text(sb, it2, it)
if char == last_char:
self.autocomplete.show_completions(is_auto=True, complete=False)
# return False so as not to be called repeatedly.
return False
@sourceview_keyhandler('parenleft', 0)
def on_sourceview_parenleft(self):
idle_add(self.call_tips.show, True)
def on_sourceview_keypress(self, _widget, event):
return handle_keypress(self, event, sourceview_keyhandlers)
# Autoparen
@sourceview_keyhandler('space', 0)
def on_sourceview_space(self):
"""
If a space was hit after a callable-only object, add parentheses.
"""
if self.is_executing:
return False
if not self.config.get_bool('autoparen'):
return False
return self.autoparen.add_parens()
def is_callable_only(self, expr):
return self.call_subp_catch(u'is_callable_only', expr)
def get_expects_str(self):
return set(self.config.get('expects-str-2').split())
def autoparen_show_call_tip(self):
self.call_tips.show(is_auto=True)
# History
def on_textview_keypress(self, _widget, event):
keyval_name, state = parse_keypress_event(event)
if (keyval_name, state) in (('Return', 0), ('KP_Enter', 0)):
return self.history.copy_to_sourceview()
def on_history_up(self, _widget):
self.history.history_up()
def on_history_down(self, _widget):
self.history.history_down()
# Subprocess
def show_welcome(self):
s = self.subp_welcome + 'DreamPie %s\n' % __version__
self.write(s, MESSAGE)
self.output.start_new_section()
def configure_subp(self):
config = self.config
if config.get_bool('use-reshist'):
reshist_size = config.get_int('reshist-size')
else:
reshist_size = 0
self.call_subp(u'set_reshist_size', reshist_size)
self.menuitem_clear_reshist.props.sensitive = (reshist_size > 0)
self.call_subp(u'set_pprint', config.get_bool('pprint'))
self.call_subp(u'set_matplotlib_ia',
config.get_bool('matplotlib-ia-switch'),
config.get_bool('matplotlib-ia-warn'))
def run_init_code(self, runfile=None):
"""
Runs the init code.
This will result in the code being run and a '>>>' printed afterwards.
If there's no init code, will just print '>>>'.
If runfile is given, will also execute the code in that.
"""
init_code = unicode(eval(self.config.get('init-code')))
if runfile:
msg = "Running %s" % runfile
# This should be both valid py3 and py2 code.
init_code += ('\n\nprint(%r)\nexec(open(%r).read())\n'
% (msg, runfile))
if init_code:
is_ok, syntax_error_info = self.call_subp(u'execute', init_code)
if not is_ok:
msg, lineno, offset = syntax_error_info
warning = _(
"Could not run initialization code because of a syntax "
"error:\n"
"%s at line %d col %d.") % (msg, lineno+1, offset+1)
msg = gtk.MessageDialog(self.window_main, gtk.DIALOG_MODAL,
gtk.MESSAGE_WARNING, gtk.BUTTONS_CLOSE,
warning)
_response = msg.run()
msg.destroy()
else:
self.set_is_executing(True)
if not self.is_executing:
self.write('>>> ', COMMAND, PROMPT)
def on_subp_terminated(self):
if self.is_terminating:
return
# This may raise an exception if subprocess couldn't be started,
# but hopefully if it was started once it will be started again.
self._n_unclaimed_results = 0
self.subp.start()
self.set_is_executing(False)
self.write('\n')
self.write(
'==================== New Session ====================\n',
MESSAGE)
self.output.start_new_section()
self.configure_subp()
self.run_init_code()
self.vadj_to_bottom.scroll_to_bottom()
self.sourceview.grab_focus()
def on_restart_subprocess(self, _widget):
self.subp.kill()
def on_stdout_recv(self, data):
self.write_output(data, STDOUT)
def on_stderr_recv(self, data):
self.write_output(data, STDERR)
def call_subp(self, funcname, *args):
"""
Make an RPC call, blocking until an answer is received.
"""
assert not self.is_executing
while self._n_unclaimed_results:
self.subp.recv_object()
self.subp.send_object((funcname, args))
return self.subp.recv_object()
def call_subp_noblock(self, funcname, *args):
"""
Make a non-blocking RPC call.
Will wait for SUBP_WAIT_TIMEOUT_S and if no answer is received will
raise a TimeoutError. The query will be executed when the subprocess
becomes responsive again, but will be discarded.
"""
assert not self.is_executing
while self._n_unclaimed_results:
returned = self.subp.wait_for_object(SUBP_WAIT_TIMEOUT_S)
if returned:
self.subp.recv_object()
else:
raise TimeoutError
self.subp.send_object((funcname, args))
returned = self.subp.wait_for_object(SUBP_WAIT_TIMEOUT_S)
if returned:
return self.subp.recv_object()
else:
self._n_unclaimed_results += 1
raise TimeoutError
def call_subp_catch(self, funcname, *args):
"""
Make a non-blocking RPC call.
If executing, return None.
If a TimeoutError is raised, catch it and return None.
"""
if self.is_executing:
return None
try:
return self.call_subp_noblock(funcname, *args)
except TimeoutError:
return None
def on_object_recv(self, obj):
if self._n_unclaimed_results:
self._n_unclaimed_results -= 1
return
assert self.is_executing
is_success, val_no, val_str, exception_string, rem_stdin = obj
if not is_success:
self.write_output(exception_string, EXCEPTION, onnewline=True)
else:
if val_str is not None:
if val_no is not None:
sep = ' ' if '\n' not in val_str else '\n'
self.write_output('%d:%s' % (val_no, sep), RESULT_IND,
onnewline=True)
self.write_output(val_str+'\n', RESULT)
self.write('>>> ', COMMAND, PROMPT)
self.set_is_executing(False)
self.handle_rem_stdin(rem_stdin)
def handle_rem_stdin(self, rem_stdin):
"""
Add the stdin text that was not processed to the source buffer.
Remove it from the text buffer (we check that the STDIN text is
consistent with rem_stdin - otherwise we give up)
"""
if not rem_stdin:
return
self.sourcebuffer.insert(self.sourcebuffer.get_start_iter(), rem_stdin)
self.sv_scroll_cursor_onscreen()
tb = self.textbuffer
stdin = tb.get_tag_table().lookup(STDIN)
it = tb.get_end_iter()
if not it.ends_tag(stdin):
it.backward_to_tag_toggle(stdin)
while True:
it2 = it.copy()
it2.backward_to_tag_toggle(stdin)
cur_stdin = get_text(tb, it2, it, True)
min_len = min(len(cur_stdin), len(rem_stdin))
assert min_len > 0
if cur_stdin[-min_len:] != rem_stdin[-min_len:]:
debug("rem_stdin doesn't match what's in textview")
break
it2.forward_chars(len(cur_stdin)-min_len)
tb.delete(it2, it)
rem_stdin = rem_stdin[:-min_len]
if not rem_stdin:
break
else:
it = it2
# if rem_stdin is left, it2 must be at the beginning of the
# stdin region.
it2.backward_to_tag_toggle(stdin)
assert it2.ends_tag(stdin)
def on_execute_command(self, _widget):
if self.is_executing:
self.send_stdin()
elif self.sourcebuffer.get_char_count() == 0:
beep()
else:
self.execute_source()
return True
def on_interrupt(self, _widget):
if self.subp_can_mask_sigint or self.is_executing:
self.subp.interrupt()
else:
self.status_bar.set_status(
_("A command isn't being executed currently"))
beep()
# History persistence
def on_save_history(self, _widget):
self.histpersist.save()
def on_save_history_as(self, _widget):
self.histpersist.save_as()
def on_load_history(self, _widget):
self.histpersist.load()
# Recent history files
def on_recent_manager_changed(self, _recent_manager):
self.update_recent()
def update_recent(self):
"""Update the menu and self.recent_filenames"""
rman = self.recent_manager
recent_items = [it for it in rman.get_items()
if it.has_application('dreampie')
and it.get_uri().startswith('file://')]
# it.get_visited() makes more sense, but since we call RecentManager.add
# when we open and when we save, get_modified() does the trick.
recent_items.sort(key=lambda it: it.get_modified(),
reverse=True)
self.menuitem_recentsep.props.visible = (len(recent_items) > 0)
for i, menuitem in enumerate(self.menuitem_recent):
if i < len(recent_items):
it = recent_items[i]
fn = it.get_uri()[len('file://'):]
menuitem.props.visible = True
menuitem.child.props.label = "_%d %s" % (i, fn)
self.recent_filenames[i] = fn
else:
menuitem.props.visible = False
self.recent_filenames[i] = None
def on_menuitem_recent(self, widget):
num = self.menuitem_recent.index(widget)
fn = self.recent_filenames[num]
self.histpersist.load_filename(fn)
# Discard history
def discard_hist_before_tag(self, tag):
"""
Discard history before the given tag. If tag == COMMAND, this discards
all history, and if tag == MESSAGE, this discards previous sessions.
"""
tb = self.textbuffer
tag = tb.get_tag_table().lookup(tag)
it = tb.get_end_iter()
it.backward_to_tag_toggle(tag)
if not it.begins_tag(tag):
it.backward_to_tag_toggle(tag)
tb.delete(tb.get_start_iter(), it)
def on_discard_history(self, _widget):
xml = glade.XML(gladefile, 'discard_hist_dialog')
d = xml.get_widget('discard_hist_dialog')
d.set_transient_for(self.window_main)
d.set_default_response(gtk.RESPONSE_OK)
previous_rad = xml.get_widget('previous_rad')
all_rad = xml.get_widget('all_rad')
previous_rad.set_group(all_rad)
previous_rad.props.active = True
r = d.run()
d.destroy()
if r == gtk.RESPONSE_OK:
tb = self.textbuffer
if previous_rad.props.active:
self.discard_hist_before_tag(MESSAGE)
else:
self.discard_hist_before_tag(COMMAND)
tb.insert_with_tags_by_name(
tb.get_start_iter(),
'================= History Discarded =================\n',
MESSAGE)
self.status_bar.set_status(_('History discarded.'))
self.histpersist.forget_filename()
# Folding
def on_section_menu_activate(self, widget):
"""
Called when the used clicked a section-related item in a popup menu.
"""
tb = self.textbuffer
it = tb.get_iter_at_mark(self.popup_mark)
r = self.folding.get_section_status(it)
if r is None:
# May happen if something was changed in the textbuffer between
# popup and activation
return
typ, is_folded, start_it = r
if widget is self.fold_unfold_section_menu:
# Fold/Unfold
if is_folded is None:
# No point in folding.
beep()
elif not is_folded:
self.folding.fold(typ, start_it)
else:
self.folding.unfold(typ, start_it)
else:
if typ == COMMAND:
text = self.history.iter_get_command(start_it)
else:
end_it = start_it.copy()
end_it.forward_to_tag_toggle(self.folding.get_tag(typ))
text = get_text(tb, start_it, end_it)
if sys.platform == 'win32':
text = text.replace('\n', '\r\n')
if widget is self.copy_section_menu:
# Copy
self.selection.clipboard.set_text(text)
elif widget is self.view_section_menu:
# View
fd, fn = tempfile.mkstemp()
os.write(fd, text)
os.close(fd)
viewer = eval(self.config.get('viewer'))
self.spawn_and_forget('%s %s' % (viewer, fn))
elif widget is self.save_section_menu:
# Save
def func(filename):
f = open(filename, 'wb')
f.write(text)
f.close()
save_dialog(func, _("Choose where to save the section"),
self.main_widget, _("All Files"), "*", None)
else:
assert False, "Unexpected widget"
def spawn_and_forget(self, argv):
"""
Start a process and forget about it.
"""
if sys.platform == 'linux2':
# We use a trick so as not to create zombie processes: we fork,
# and let the fork spawn the process (actually another fork). The
# (first) fork immediately exists, so the process we spawned is
# made the child of process number 1.
pid = os.fork()
if pid == 0:
_p = subprocess.Popen(argv, shell=True)
os._exit(0)
else:
os.waitpid(pid, 0)
else:
_p = subprocess.Popen(argv, shell=True)
def on_double_click(self, event):
"""If we are on a folded section, unfold it and return True, to
avoid event propagation."""
tv = self.textview
if tv.get_window(gtk.TEXT_WINDOW_TEXT) is not event.window:
# Probably a click on the border or something
return
x, y = tv.window_to_buffer_coords(gtk.TEXT_WINDOW_TEXT,
int(event.x), int(event.y))
it = tv.get_iter_at_location(x, y)
r = self.folding.get_section_status(it)
if r is not None:
typ, is_folded, start_it = r
if is_folded:
self.folding.unfold(typ, start_it)
return True
def on_fold_last(self, _widget):
self.folding.fold_last()
def on_unfold_last(self, _widget):
self.folding.unfold_last()
def on_clear(self,_widget):
window_height = self.textview.get_visible_rect().height
_,char_height = self.get_char_width_height()
num_of_lines = int(window_height/char_height)+2
self.output.write("\n"*num_of_lines,tag_names="stdout")
# Notebook tabs
def on_notebook_switch_page(self, _widget, _page, page_num):
new_sv = self.notebook.get_nth_page(page_num).get_child()
for cb in self.sv_changed:
cb(new_sv)
def new_tab(self, index=None):
# The following line should result in on_notebook_switch_page, which
# will take care of calling on_sv_change functions.
self.create_sourcebufferview(index)
self.notebook.props.show_tabs = True
self.reopen_tab_data = None
self.menuitem_reopen_tab.props.sensitive = False
def on_new_tab(self, _widget):
self.new_tab()
def on_reopen_tab(self, _widget):
index, text = self.reopen_tab_data
self.new_tab(index)
self.sourcebuffer.set_text(text)
def on_close_tab(self, _widget):
if self.notebook.get_n_pages() == 1:
beep()
return
else:
self.close_current_tab()
def close_current_tab(self):
assert self.notebook.get_n_pages() > 1
cur_page = self.notebook.get_current_page()
text = get_text(self.sourcebuffer, self.sourcebuffer.get_start_iter(),
self.sourcebuffer.get_end_iter())
if text:
self.reopen_tab_data = (cur_page, text)
self.menuitem_reopen_tab.props.sensitive = True
else:
self.reopen_tab_data = None
self.menuitem_reopen_tab.props.sensitive = False
scrolledwin = self.notebook.get_nth_page(cur_page)
new_page = cur_page-1 if cur_page > 0 else 1
# This should result in on_notebook_switch_page which will set
# everything to use the new sourcebuffer
self.notebook.set_current_page(new_page)
assert self.sourceview is not scrolledwin.get_child()
self.notebook.remove_page(cur_page)
if self.notebook.get_n_pages() == 1:
self.notebook.props.show_tabs = False
if True:
scrolledwin.destroy()
else:
# Verify that the sourceview and sourcebuffer are indeed destroyed,
# and not referenced anywhere
import weakref, gc
r = weakref.ref(scrolledwin.get_child().get_buffer())
scrolledwin.destroy()
gc.collect()
assert r() is None
def on_prev_tab(self, _widget):
self.notebook.prev_page()
def on_next_tab(self, _widget):
self.notebook.next_page()
# Other events
def on_show_completions(self, _widget):
self.autocomplete.show_completions(is_auto=False, complete=False)
def complete_dict_keys(self, expr):
return self.call_subp_catch(u'complete_dict_keys', expr)
def complete_attributes(self, expr):
return self.call_subp_catch(u'complete_attributes', expr)
def complete_firstlevels(self):
return self.call_subp_catch(u'complete_firstlevels')
def get_func_args(self, expr):
return self.call_subp_catch(u'get_func_args', expr)
def find_modules(self, expr):
return self.call_subp_catch(u'find_modules', expr)
def get_module_members(self, expr):
return self.call_subp_catch(u'get_module_members', expr)
def complete_filenames(self, str_prefix, text, str_char, add_quote):
return self.call_subp_catch(u'complete_filenames', str_prefix, text, str_char,
add_quote)
def on_show_calltip(self, _widget):
self.call_tips.show(is_auto=False)
def get_func_doc(self, expr):
return self.call_subp_catch(u'get_func_doc', expr)
def configure(self):
"""
Apply configuration. Called on initialization and after configuration
was changed by the configuration dialog.
"""
config = self.config
tv = self.textview; tb = self.textbuffer
sourceviews = [self.notebook.get_nth_page(i).get_child()
for i in range(self.notebook.get_n_pages())]
font_name = config.get('font')
font = pango.FontDescription(font_name)
tv.modify_font(font)
for sv in sourceviews:
sv.modify_font(font)
theme = tags.get_theme(self.config, self.config.get('current-theme'))
tags.apply_theme_text(tv, tb, theme)
for sv in sourceviews:
tags.apply_theme_source(sv.get_buffer(), theme)
vertical_layout = self.config.get_bool('vertical-layout')
if vertical_layout:
pane = self.vpaned_main; other_pane = self.hpaned_main
self.notebook.props.tab_pos = gtk.POS_BOTTOM
else:
pane = self.hpaned_main; other_pane = self.vpaned_main
self.notebook.props.tab_pos = gtk.POS_TOP
pane.props.visible = True
other_pane.props.visible = False
if pane.get_child1() is None:
child1 = other_pane.get_child1(); other_pane.remove(child1)
child2 = other_pane.get_child2(); other_pane.remove(child2)
pane.pack1(child1, resize=True, shrink=False)
pane.pack2(child2, resize=not vertical_layout, shrink=False)
# If the fonts were changed, we might need to enlarge the window
last_font, last_vertical = self.last_configured_layout
if last_font != font or last_vertical != vertical_layout:
self.set_window_size(vertical_layout)
self.last_configured_layout = font, vertical_layout
command_defs = self.textbuffer.get_tag_table().lookup(COMMAND_DEFS)
command_defs.props.invisible = config.get_bool('hide-defs')
def configure_sourceview(self, sv):
"""
Apply configuration to a newly created sourceview.
This does the same for a single sourceview as configure() does for
all of them.
"""
font_name = self.config.get('font')
font = pango.FontDescription(font_name)
sv.modify_font(font)
theme = tags.get_theme(self.config, self.config.get('current-theme'))
tags.apply_theme_source(sv.get_buffer(), theme)
def on_preferences(self, _widget):
cd = ConfigDialog(self.config, gladefile, self.window_main)
r = cd.run()
if r == gtk.RESPONSE_OK:
self.configure()
self.configure_subp()
cd.destroy()
def on_clear_reshist(self, _widget):
try:
self.call_subp_noblock(u'clear_reshist')
except TimeoutError:
# Will happen anyway when idle job ends
pass
self.status_bar.set_status(_("Result history cleared."))
def on_close(self, _widget, _event):
self.quit()
return True
def on_quit(self, _widget):
self.quit()
def quit(self):
was_saved = self.histpersist.was_saved()
if (self.textbuffer.get_modified()
and (was_saved or self.config.get_bool('ask-on-quit'))):
d = gtk.MessageDialog(
parent=self.window_main,
flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
type=gtk.MESSAGE_WARNING,
message_format=_('Save history before closing?'))
d.props.secondary_text = _("If you don't save, your history will be lost.")
CANCEL, DISCARD, SAVE = range(3)
discard_btn = d.add_button(_("Close _without saving"), DISCARD)
_cancel_btn = d.add_button(_("_Cancel"), CANCEL)
save_btn = d.add_button(_("_Save"), SAVE)
if not was_saved:
dontask_chk = gtk.CheckButton(
_("Don't ask me again when the history was never saved"))
dontask_chk.show()
d.get_content_area().pack_start(dontask_chk, fill=True, expand=False)
d.set_default_response(DISCARD)
discard_btn.grab_focus()
else:
d.set_default_response(SAVE)
save_btn.grab_focus()
r = d.run()
if not was_saved and r == DISCARD and dontask_chk.props.active:
self.config.set_bool('ask-on-quit', False)
self.config.save()
if r == SAVE:
saved = self.histpersist.save()
quit = saved
elif r == DISCARD:
quit = True
else:
quit = False
d.destroy()
else:
quit = True
if quit:
self.is_terminating = True
self.window_main.destroy()
self.subp.kill()
gtk.main_quit()
def on_about(self, _widget):
d = get_widget('about_dialog')
d.set_transient_for(self.window_main)
d.set_version(__version__)
d.set_logo(gdk.pixbuf_new_from_file(
path.join(data_dir, 'dreampie.png')))
d.run()
d.destroy()
def on_update_available(self, is_git, latest_name, latest_time):
date = time.strftime('%Y/%m/%d', time.localtime(latest_time))
if is_git:
msg = _("A new git commit is available, from %s. "
"Run 'git pull' to update." % date)
else:
self.get_update_menu.show()
msg = _("A new DreamPie version, %s, is available. "
"Click Help->Get New Version to update." % latest_name)
self.status_bar.set_status(msg)
def on_get_update_menu_activate(self, _widget):
webbrowser.open('http://www.dreampie.org/download.html')
def on_report_bug(self, _widget):
bug_report.bug_report(self.window_main, gladefile, None)
def on_homepage(self, _widget):
webbrowser.open('http://www.dreampie.org/')
def on_getting_started(self, _widget):
self.show_getting_started_dialog()
def show_getting_started_dialog(self):
d = get_widget('getting_started_dialog')
d.set_transient_for(self.window_main)
d.run()
d.destroy()
def on_textview_button_press_event(self, _widget, event):
if event.button == 3:
self.show_popup_menu(event)
return True
elif event.button == 2:
return self.on_sourceview_button_press_event(_widget, event)
elif event.type == gdk._2BUTTON_PRESS:
return self.on_double_click(event)
def show_popup_menu(self, event):
tv = self.textview
tb = self.textbuffer
if tb.get_has_selection():
self.popup_sel_menu.popup(None, None, None, event.button,
event.get_time())
else:
if tv.get_window(gtk.TEXT_WINDOW_TEXT) is not event.window:
# Probably a click on the border or something
return
x, y = tv.window_to_buffer_coords(gtk.TEXT_WINDOW_TEXT,
int(event.x), int(event.y))
it = tv.get_iter_at_location(x, y)
r = self.folding.get_section_status(it)
if r is not None:
typ, is_folded, _start_it = r
if typ == OUTPUT:
typ_s = _('Output Section')
else:
typ_s = _('Code Section')
self.fold_unfold_section_menu.props.visible = (
is_folded is not None)
self.fold_unfold_section_menu.child.props.label = (
_('Unfold %s') if is_folded else _('Fold %s')) % typ_s
self.copy_section_menu.child.props.label = _('Copy %s') % typ_s
self.view_section_menu.child.props.label = _('View %s') % typ_s
self.save_section_menu.child.props.label = _('Save %s') % typ_s
self.view_section_menu.props.visible = \
bool(eval(self.config.get('viewer')))
tb.move_mark(self.popup_mark, it)
self.popup_nosel_menu.popup(None, None, None, event.button,
event.get_time())
else:
beep()
def main():
usage = "%prog [options] [python-executable]"
version = 'DreamPie %s' % __version__
parser = OptionParser(usage=usage, version=version)
parser.add_option("--run", dest="runfile",
help="A file to run upon initialization. It will be "
"run only once.")
if sys.platform == 'win32':
parser.add_option("--hide-console-window", action="store_true",
dest="hide_console",
help="Hide the console window")
opts, args = parser.parse_args()
if len(args) > 1:
parser.error("Can accept at most one argument")
if len(args) == 1:
pyexec = args[0]
elif 'dreampie' in sys.executable.lower():
# We are under py2exe.
msg = gtk.MessageDialog(
None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE,
_("DreamPie must be given the file name of a Python interpreter. "
"Please create a shortcut to something like '%s "
"--hide-console-window c:\\python26\\python.exe'.")
% os.path.abspath(sys.argv[0]))
_response = msg.run()
msg.destroy()
sys.exit(1)
else:
pyexec = sys.executable
if sys.platform == 'win32' and opts.hide_console:
from .hide_console_window import hide_console_window
hide_console_window()
gtk.widget_set_default_direction(gtk.TEXT_DIR_LTR)
_dp = DreamPie(pyexec, opts.runfile)
gtk.main()
|
gpl-3.0
|
LamaHamadeh/Microsoft-DAT210x
|
Module-2/assignment5.py
|
1
|
2233
|
'''
author Lama Hamadeh
'''
import pandas as pd
import numpy as np
#
# TODO:
# Load up the dataset, setting correct header labels.
#
# .. your code here ..
Census_DataFrame = pd.read_csv('/Users/Admin/Desktop/DAT210x/DAT210x-master/Module2/Datasets/census.data',na_values=["?"])
Census_DataFrame = Census_DataFrame.drop(labels=['0'], axis=1)
Census_DataFrame.columns=['education', 'age', 'capital-gain', 'race', 'capital-loss', 'hours-per-week', 'sex', 'classification']
Census_DataFrame = Census_DataFrame.fillna(0)
#print(Census_DataFrame.dtypes)
#ordered_age = ['20', '25', '30', '35','40', '45', '50', '55', '60']
#Census_DataFrame.age = Census_DataFrame.age.astype("category", ordered=True, categories=ordered_age).cat.codes
ordered_education = ['5th', '6th', '7th', '8th','7th-8th', '9th', '10th', '11th', '12th', 'HS-grad', 'Some-college' , 'Bachelors','Masters','Doctorate' ]
Census_DataFrame.education = Census_DataFrame.education.astype("category", ordered=True, categories=ordered_education).cat.codes
print(Census_DataFrame)
#
# TODO:
# Use basic pandas commands to look through the dataset... get a
# feel for it before proceeding! Do the data-types of each column
# reflect the values you see when you look through the data using
# a text editor / spread sheet program? If you see 'object' where
# you expect to see 'int32' / 'float64', that is a good indicator
# that there is probably a string or missing value in a column.
# use `your_data_frame['your_column'].unique()` to see the unique
# values of each column and identify the rogue values. If these
# should be represented as nans, you can convert them using
# na_values when loading the dataframe.
#
# .. your code here ..
#
# TODO:
# Look through your data and identify any potential categorical
# features. Ensure you properly encode any ordinal and nominal
# types using the methods discussed in the chapter.
#
# Be careful! Some features can be represented as either categorical
# or continuous (numerical). Think to yourself, does it generally
# make more sense to have a numeric type or a series of categories
# for these somewhat ambigious features?
#
# .. your code here ..
#
# TODO:
# Print out your dataframe
#
# .. your code here ..
|
mit
|
liyu1990/sklearn
|
sklearn/externals/joblib/__init__.py
|
23
|
4764
|
""" Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
|
bsd-3-clause
|
pastas/pasta
|
tests/test_qgxg.py
|
1
|
2703
|
# -*- coding: utf-8 -*-
"""
Author: T. van Steijn, R.A. Collenteur, 2017
"""
import numpy as np
import pandas as pd
import pastas as ps
class TestQGXG(object):
def test_q_ghg(self):
n = 101
idx = pd.date_range('20160101', freq='d', periods=n)
s = pd.Series(np.arange(n), index=idx)
v = ps.stats.q_ghg(s, q=.94)
assert v == 94.
def test_q_glg(self):
n = 101
idx = pd.date_range('20160101', freq='d', periods=n)
s = pd.Series(np.arange(n), index=idx)
v = ps.stats.q_glg(s, q=.06)
assert v == 6.
def test_q_ghg_nan(self):
idx = pd.date_range('20160101', freq='d', periods=4)
s = pd.Series([1, np.nan, 3, np.nan], index=idx)
v = ps.stats.q_ghg(s, q=.5)
assert v == 2.
def test_q_gvg(self):
idx = pd.to_datetime(['20160320', '20160401', '20160420'])
s = pd.Series([0, 5, 10], index=idx)
v = ps.stats.q_gvg(s)
assert v == 2.5
def test_q_gvg_nan(self):
idx = pd.to_datetime(['20160820', '20160901', '20161120'])
s = pd.Series([0, 5, 10], index=idx)
v = ps.stats.q_gvg(s)
assert np.isnan(v)
def test_q_glg_tmin(self):
tmin = '20160301'
idx = pd.date_range('20160101', '20160331', freq='d')
s = pd.Series(np.arange(len(idx)), index=idx)
v = ps.stats.q_glg(s, q=.06, tmin=tmin)
assert v == 61.8
def test_q_ghg_tmax(self):
n = 101
tmax = '20160301'
idx = pd.date_range('20160101', freq='d', periods=n)
s = pd.Series(np.arange(n), index=idx)
v = ps.stats.q_ghg(s, q=.94, tmax=tmax)
assert v == 56.4
def test_q_gvg_tmin_tmax(self):
tmin = '20170301'
tmax = '20170401'
idx = pd.to_datetime(['20160401', '20170401', '20180401'])
s = pd.Series([0, 5, 10], index=idx)
v = ps.stats.q_gvg(s, tmin=tmin, tmax=tmax)
assert v == 5
def test_q_gxg_series(self):
s = pd.read_csv('tests/data/hseries_gxg.csv', index_col=0, header=0,
parse_dates=True, dayfirst=True, squeeze=True)
ghg = ps.stats.q_ghg(s)
glg = ps.stats.q_glg(s)
gvg = ps.stats.q_gvg(s)
print('\n')
print('calculated GXG\'s percentile method: \n')
print(('GHG: {ghg:.2f} m+NAP\n'
'GLG: {glg:.2f} m+NAP\n'
'GVG: {gvg:.2f} m+NAP\n').format(
ghg=ghg, glg=glg, gvg=gvg))
print('Menyanthes GXG\'s: \n')
print(('GHG: {ghg:.2f} m+NAP\n'
'GLG: {glg:.2f} m+NAP\n'
'GVG: {gvg:.2f} m+NAP\n').format(
ghg=-3.23, glg=-3.82, gvg=-3.43))
|
mit
|
Mehotkhan/persian-twitter-day
|
tw/words_cloud.py
|
1
|
1417
|
#!/usr/bin/env python
"""
Minimal Example
===============
Generating a square wordcloud from the US constitution using default arguments.
"""
import numpy as np
import string
from PIL import Image
from os import path
from wordcloud import WordCloud
import arabic_reshaper
from bidi.algorithm import get_display
# from wordcloud import WordCloud, STOPWORDS
d = path.dirname(__file__)
mask = np.array(Image.open(path.join(d, "stormtrooper_mask.png")))
font_path = path.join(d, 'management/commands/fonts', 'Vazir-Light.ttf')
# Read the whole text.
text = open(path.join(d, 'persian.txt'), encoding='utf-8').read()
text_ = arabic_reshaper.reshape(text)
bidi_text = get_display(text_)
# Generate a word cloud image
STOPWORDS = set([get_display(arabic_reshaper.reshape(x.strip())) for x in
open((path.join(d, 'management/commands/stop_words.txt')), encoding='utf-8').read().split('\n')])
# STOPWORDS = arabic_reshaper.reshape(STOPWORDS)
# bidi_text_stop = get_display(STOPWORDS)
stopwords = set(STOPWORDS)
# print(stopwords)
# exit()
wordcloud = WordCloud(
font_path=font_path,
max_words=5000000,
stopwords=stopwords,
# mask=mask,
margin=0,
width=800,
height=800,
min_font_size=1,
max_font_size=500,
background_color="white"
# random_state=1
).generate(bidi_text)
# The pil way (if you don't have matplotlib)
image = wordcloud.to_image()
image.show()
|
gpl-2.0
|
Garrett-R/scikit-learn
|
examples/svm/plot_svm_kernels.py
|
329
|
1971
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
br4nd/nnreplay
|
tests/triangleScore_test.py
|
1
|
2458
|
#!/usr/local/bin/python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pdb
from pprint import pprint as pp
def triangleScore(P0,P1,P2) :
x0 = P0[0]
y0 = P0[1]
z0 = P0[2]
x1 = P1[0]
y1 = P1[1]
z1 = P1[2]
x2 = P2[0]
y2 = P2[1]
z2 = P2[2]
# Find the lengths of the edges
len_a = np.sqrt( (x1-x2)**2 + (y1-y2)**2 + (z1-z2)**2 )
len_b = np.sqrt( (x0-x2)**2 + (y0-y2)**2 + (z0-z2)**2 )
len_c = np.sqrt( (x0-x1)**2 + (y0-y1)**2 + (z0-z1)**2 )
altitudeA = np.sqrt((len_b*len_c/(len_b+len_c)**2) *((len_b+len_c)**2-len_a**2));
altitudeB = np.sqrt((len_a*len_c/(len_a+len_c)**2) *((len_a+len_c)**2-len_b**2));
altitudeC = np.sqrt((len_a*len_b/(len_a+len_b)**2) *((len_a+len_b)**2-len_c**2));
# High minimum height is best
score = min([altitudeA, altitudeB, altitudeC])
# pdb.set_trace()
return score
if __name__ == "__main__" :
# Start with some example triangle edge lengths
a = 10.
b = 10.
c = 10.
# Create the test coordinates
xA = 0.
yA = 0.
zA = 0.
xB = c
yB = 0.
zB = 0.
angA = np.arccos((-a**2 + b**2 + c**2)/(2*b*c))
angA_deg = angA*180./np.pi
#angB = acos((-bh^2 + ah^2 + ch^2)/(2*ah*ch)); angleB_deg = angB*180/pi
#angC = acos((-ch^2 + ah^2 + bh^2)/(2*ah*bh)); angleC_deg = angC*180/pi
xC = b*np.cos(angA)
yC = b*np.sin(angA)
zC = 0.
P0 = [xA,yA,zA]
P1 = [xB,yB,zB]
P2 = [xC,yC,zC]
P0 = np.array([17862, 199, 2589])/1000.
P1 = np.array([13679, 10648, 2544])/1000.
P2 = np.array([17831, 10575, 2557])/1000.
# Gut check
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot([P0[0],P1[0],P2[0],P0[0]], [P0[1],P1[1],P2[1],P0[1]],[P0[2],P1[2],P2[2],P0[2]], '.-');
ax.text(P0[0],P1[0],P2[0],'A')
ax.text(P0[1],P1[1],P2[1],'B')
ax.text(P0[2],P1[2],P2[2],'C')
# ax.axis('equal')
# ax.axis([-5 15 15
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.draw()
# Anchor Array
Xvec = [xA, xB, xC]
Yvec = [yA, yB, yC]
AA = [Xvec,Yvec]
# find score
score = triangleScore(P0,P1,P2)
print 'X = [%7.3f, %7.3f, %7.3f]' % (Xvec[0],Xvec[1],Xvec[2])
print 'Y = [%7.3f, %7.3f, %7.3f]' % (Yvec[0],Yvec[1],Yvec[2])
print 'score = %f\n' % score
pdb.set_trace()
|
mit
|
mattmeehan/decotools
|
decotools/blob_extraction.py
|
1
|
13260
|
from __future__ import division
import numpy as np
import pandas as pd
from skimage import io, measure
from PIL import Image
from collections import Counter, Iterable
def get_image_array(image_file, rgb_sum=False):
'''Returns an image array
Parameters
----------
image_file : str
Path to image file.
rgb_sum : bool, optional
Whether to use a simple RGB sum to convert to image to grayscale, or
to use a weighted RGB sum (default: False).
Returns
-------
numpy.ndarray
Grayscale image array
'''
if rgb_sum:
image = io.imread(image_file)
# Convert RGB image to R+G+B grayscale
image = np.sum(image[:, :, :-1], axis=2)
else:
img = Image.open(image_file)
# Convert grayscale using a weighted RGB sum
# From PIL documentation the weighted sum is given by
# grayscale = R * 299/1000 + G * 587/1000 + B * 114/1000
img = img.convert('L')
# Convert img to a numpy array
image = np.asarray(img, dtype=float).T
return image
class Blob(object):
'''Class that defines a 'blob' in an image: the contour of a set of pixels
with values above a given threshold.
'''
def __init__(self, x, y):
'''Define a counter by its contour lines (an list of points in the xy
plane), the contour centroid, and its enclosed area.
'''
self.x = np.array(x)
self.y = np.array(y)
self.xc = np.mean(x)
self.yc = np.mean(y)
self._length = x.shape[0]
# Find the area inside the contour
area = 0
for i in range(self._length):
area += 0.5*(y[i]+y[i-1])*(x[i]-x[i-1])
self.area = area
def __repr__(self):
str_rep = 'Blob(xc={}, yc={}, area={})'.format(self.xc, self.yc,
self.area)
return str_rep
def length(self):
''' Find the approx length of the blob from the max points of the
contour. '''
xMin = self.x.min()
xMax = self.x.max()
yMin = self.y.min()
yMax = self.y.max()
len_ = np.sqrt((xMin - xMax)**2 + (yMin - yMax)**2)
return len_
def distance(self, blob):
'''Calculate the distance between the centroid of this blob contour and
another one in the xy plane.'''
return np.sqrt((self.xc - blob.xc)**2 + (self.yc-blob.yc)**2)
def findblobs(image, threshold, min_area=2., max_area=1000.):
'''Pass through an image and find a set of blobs/contours above a set
threshold value. The min_area parameter is used to exclude blobs with
an area below this value.'''
blobs = []
nx, ny = image.shape
# Find contours using the Marching Squares algorithm in the scikit package.
contours = measure.find_contours(image, threshold)
for contour in contours:
y = contour[:, 1]
x = contour[:, 0]
blob = Blob(x, y)
if blob.area >= min_area and blob.area <= max_area:
blobs.append(blob)
return blobs
class BlobGroup(object):
'''A list of blobs that is grouped or associated in some way, i.e., if
their contour centroids are relatively close together.'''
def __init__(self, image):
'''Initialize a list of stored blobs and the bounding rectangle which
defines the group.'''
self.blobs = []
self.xmin = 1e10
self.xmax = -1e10
self.ymin = 1e10
self.ymax = -1e10
self.image = image
self.xc = None
self.yc = None
def __repr__(self):
str_rep = 'BlobGroup(n_blobs={}, xc={}, yc={})'.format(
len(self.blobs), self.xc, self.yc)
return str_rep
def add_blob(self, blob):
'''Add a blob to the group and enlarge the bounding rectangle of the
group.'''
self.blobs.append(blob)
self.xmin = min(self.xmin, blob.x.min())
self.xmax = max(self.xmax, blob.x.max())
self.ymin = min(self.ymin, blob.y.min())
self.ymax = max(self.ymax, blob.y.max())
self.xc = np.mean([b.xc for b in self.blobs])
self.yc = np.mean([b.yc for b in self.blobs])
def get_bounding_box(self):
'''Get the bounding rectangle of the group.'''
return (self.xmin, self.xmax, self.ymin, self.ymax)
def get_square_bounding_box(self):
'''Get the bounding rectangle, redefined to give it a square aspect
ratio.'''
xmin, xmax, ymin, ymax = (self.xmin, self.xmax, self.ymin, self.ymax)
xL = np.abs(xmax - xmin)
yL = np.abs(ymax - ymin)
if xL > yL:
ymin -= 0.5*(xL-yL)
ymax += 0.5*(xL-yL)
else:
xmin -= 0.5*(yL-xL)
xmax += 0.5*(yL-xL)
return (xmin, xmax, ymin, ymax)
def get_sub_image(self, image=None, size=None):
'''Given an image, extract the section of the image corresponding to
the bounding box of the blob group.'''
if image is None:
image = self.image.copy()
nx, ny = image.shape
if size is None:
x0, x1, y0, y1 = self.get_square_bounding_box()
else:
xc, yc = self.xc, self.yc
if isinstance(size, Iterable):
size_x, size_y = size
else:
size_x = size_y = size
x0, x1 = xc - size_x, xc + size_x
y0, y1 = yc - size_y, yc + size_y
# Account for all the weird row/column magic in the image table...
i0, i1 = int(x0), int(x1)
j0, j1 = int(y0), int(y1)
# Add a pixel buffer around the bounds, and check the ranges
buf = 1
i0 = 0 if i0-buf < 0 else i0-buf
i1 = nx-1 if i1 > nx-1 else i1+buf
j0 = 0 if j0-buf < 0 else j0-buf
j1 = ny-1 if j1 > ny-1 else j1+buf
return image[i0:i1, j0:j1]
def get_region_props(self, threshold, size=None):
subimage = self.get_sub_image(size=size)
labeled_image = subimage >= threshold
region_properties = measure.regionprops(labeled_image.astype(int),
subimage)
if len(region_properties) == 0:
return {}
elif len(region_properties) > 1:
raise ValueError('Image has more than one region!')
return region_properties[0]
def group_blobs(image, blobs, max_dist):
'''Given a list of blobs, group them by distance between the centroids of
any two blobs. If the centroids are more distant than max_dist, create
a new blob group.'''
n = len(blobs)
groups = []
if n >= 1:
# Single-pass clustering algorithm: make the first blob the nucleus of
# a blob group. Then loop through each blob and add either add it to
# this group (depending on the distance measure) or make it the
# nucleus of a new blob group
bg = BlobGroup(image=image)
bg.add_blob(blobs[0])
groups.append(bg)
for i in range(1, n):
bi = blobs[i]
is_grouped = False
for group in groups:
# Calculate distance measure for a blob and a blob group:
# blob has to be < max_dist from any other blob in the group
for bj in group.blobs:
if bi.distance(bj) < max_dist:
group.add_blob(bi)
is_grouped = True
break
if not is_grouped:
bg = BlobGroup(image=image)
bg.add_blob(bi)
groups.append(bg)
return np.asarray(groups, dtype=object)
def extract_blobs(image_file, threshold=20., rgb_sum=False, min_area=10.,
max_area=1000., max_dist=5., group_max_area=None, size=None):
'''Function to perform blob detection on an input image
Blobs are found using the marching squares algorithm implemented in
scikit-image.
Parameters
----------
image_file : str
Path to image file.
threshold : float, optional
Threshold for blob detection. Only pixels with an intensity above
this threshold will be used in blob detection (default: 20).
rgb_sum : bool, optional
Whether to use a simple RGB sum to convert to image to grayscale, or
to use a weighted RGB sum (default: False).
min_area : float, optional
Minimum area for a blob to be kept. This helps get rid of noise in
an image (default: 10).
max_area : float, optional
Maximum area for a blob to be kept. This helps get rid of pathological
events in an image (default: 1000).
max_dist : float, optional
Distance scale for grouping close by blobs. If two blobs are separated
by less than max_dist, they are grouped together as a single blob
(defualt: 5).
group_max_area : float, optional
Maximum area for a blob group to be kept. This helps get rid of
pathological events in an image (default: None).
size : {None, int, array-like of shape=(2,)}, optional
Size of zoomed image of extracted blobs. If an integer is provided, the
zoomed image will be a square box of size 2*size in each dimension. If
an array-like object (of shape=(2,)) is provided, then the zoomed image
will be of size 2*size[0] by 2*size[1]. Otherwise, the default behavior
is to return a square image of size twice the equivalent diameter of
the blob.
Returns
-------
pandas.DataFrame
A DataFrame containing information about the found blobs is returned.
Each row in the DataFrame corresponds to a blob group, while each
column corresponds to a pertinent quanitity (area, eccentricity,
zoomed image array, etc.).
'''
image = get_image_array(image_file, rgb_sum=rgb_sum)
if image.ndim != 2:
return pd.DataFrame()
# Calculate contours using the scikit-image marching squares algorithm,
# store as Blobs, and group the Blobs into associated clusters
blobs = findblobs(image, threshold=threshold,
min_area=min_area, max_area=max_area)
groups = group_blobs(image, blobs, max_dist=max_dist)
group_properties = []
for group_idx, group in enumerate(groups):
region_props = group.get_region_props(threshold, size=size)
prop_dict = {property_: region_props[property_]
for property_ in region_props}
prop_dict['n_blobs'] = len(group.blobs)
prop_dict['n_groups'] = len(groups)
prop_dict['blob_idx'] = group_idx
prop_dict['xc'] = group.xc
prop_dict['yc'] = group.yc
if size is None:
size = prop_dict['equivalent_diameter']
prop_dict['image'] = group.get_sub_image(size=size)
prop_dict['image_file'] = image_file
if group_max_area and prop_dict['area'] > group_max_area:
continue
group_properties.append(prop_dict)
region_prop_df = pd.DataFrame.from_records(group_properties)
return region_prop_df
def is_hotspot(x_coords, y_coords, threshold=3, radius=4.0):
'''Function to identify hot spot from a list of x-y coordinates
Parameters
----------
x_coords : array-like
X-coordinates of blob groups. Note: x_coords and y_coords must have
the same shape.
y_coords : array-like
Y-coordinates of blob groups. Note: y_coords and x_coords must have
the same shape.
threshold : int, optional
Threshold number of counts to classify an x-y coordinate pair as a hot
spot. If a (x, y) coordinate pair occurs threshold or more times,
it is considered a hot spot (default is 3).
radius : float, optional
If an x-y pair is within radius number of pixels of a hot spot, it
is also considered a hot spot.
Returns
-------
is_hot_spot : numpy.ndarray
Boolean array that specifies whether or not a blob group is a hot spot.
'''
# Cast to numpy arrays for vectorizing distance computation later on
x_coords = np.asarray(x_coords)
y_coords = np.asarray(y_coords)
# Check that x_coords and y_coords are compatiable
if not x_coords.shape == y_coords.shape:
raise ValueError('x_coords and y_coords must have the same shape.')
# Get number of times each x-y pixel combination occurs
centers_list = [(int(x), int(y)) for x, y in zip(x_coords, y_coords)]
coord_counter = Counter(centers_list)
# Get hot spot coordinates based on number of times x-y coordinates repeat
hotspots_coords = []
for coord, count in coord_counter.items():
if count >= threshold:
hotspots_coords.append(coord)
def get_distances(x1, y1, x2, y2):
return np.sqrt((x1-x2)**2 + (y1-y2)**2)
# Get mask for events within radius of hot spot
is_hot_spot = np.zeros(len(x_coords), dtype=bool)
for x_hot, y_hot in hotspots_coords:
distances = get_distances(x_coords, y_coords, x_hot, y_hot)
is_hot_spot = np.logical_or(is_hot_spot, (distances <= radius))
return is_hot_spot
|
mit
|
tmhm/scikit-learn
|
sklearn/linear_model/tests/test_sparse_coordinate_descent.py
|
244
|
9986
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
|
bsd-3-clause
|
hdmetor/scikit-learn
|
examples/bicluster/plot_spectral_biclustering.py
|
403
|
2011
|
"""
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
|
bsd-3-clause
|
pschella/scipy
|
scipy/special/basic.py
|
7
|
70921
|
#
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import math
from scipy._lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real,
imag, sqrt, where, mgrid, sin, place, issubdtype,
extract, less, inexact, nan, zeros, sinc)
from . import _ufuncs as ufuncs
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma,
psi, _zeta, hankel1, hankel2, yv, kv, _gammaln,
ndtri, poch, binom, hyp0f1)
from . import specfun
from . import orthogonal
from ._comb import _comb_int
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk',
'erf_zeros', 'erfcinv', 'erfinv', 'euler', 'factorial',
'factorialk', 'factorial2', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta']
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x) = sin(x * n/2) / (n * sin(x / 2)),
where `n` is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8, 8));
>>> for idx, n in enumerate([2, 3, 4, 9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
The following example demonstrates that `diric` gives the magnitudes
(modulo the sign and scaling) of the Fourier coefficients of a
rectangular pulse.
Suppress output of values that are effectively 0:
>>> np.set_printoptions(suppress=True)
Create a signal `x` of length `m` with `k` ones:
>>> m = 8
>>> k = 3
>>> x = np.zeros(m)
>>> x[:k] = 1
Use the FFT to compute the Fourier transform of `x`, and
inspect the magnitudes of the coefficients:
>>> np.abs(np.fft.fft(x))
array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,
0.41421356, 1. , 2.41421356])
Now find the same values (up to sign) using `diric`. We multiply
by `k` to account for the different scaling conventions of
`numpy.fft.fft` and `diric`:
>>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
>>> k * special.diric(theta, k)
array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,
-0.41421356, 1. , 2.41421356])
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def gammaln(x):
"""
Logarithm of the absolute value of the Gamma function for real inputs.
Parameters
----------
x : array-like
Values on the real line at which to compute ``gammaln``
Returns
-------
gammaln : ndarray
Values of ``gammaln`` at x.
See Also
--------
gammasgn : sign of the gamma function
loggamma : principal branch of the logarithm of the gamma function
Notes
-----
When used in conjunction with `gammasgn`, this function is useful
for working in logspace on the real axis without having to deal with
complex numbers, via the relation ``exp(gammaln(x)) = gammasgn(x)*gamma(x)``.
Note that `gammaln` currently accepts complex-valued inputs, but it is not
the same function as for real-valued inputs, and the branch is not
well-defined --- using `gammaln` with complex is deprecated and will be
disallowed in future Scipy versions.
For complex-valued log-gamma, use `loggamma` instead of `gammaln`.
"""
if np.iscomplexobj(x):
warnings.warn(("Use of gammaln for complex arguments is "
"deprecated as of scipy 0.18.0. Use "
"scipy.special.loggamma instead."),
DeprecationWarning)
return _gammaln(x)
def jnjnp_zeros(nt):
"""Compute zeros of integer-order Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n, m, t, zo = specfun.jdzo(nt)
return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
def jnyn_zeros(n, nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of
Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n), nt)
def jn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Jn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[0]
def jnp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Jn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[1]
def yn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Yn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[2]
def ynp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Yn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[3]
def y0_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1p_zeros(nt, complex=False):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = not complex
return specfun.cyzo(nt, kf, kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1
# L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
v = asarray(v)
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v, z, n=1):
"""Compute nth derivative of Bessel function Jv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v, z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
def yvp(v, z, n=1):
"""Compute nth derivative of Bessel function Yv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v, z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
def kvp(v, z, n=1):
"""Compute nth derivative of real-order modified Bessel function Kv(z)
Kv(z) is the modified Bessel function of the second kind.
Derivative is calculated with respect to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int
Order of derivative. Default is first derivative.
Returns
-------
out : ndarray
The results
Examples
--------
Calculate multiple values at order 5:
>>> from scipy.special import kvp
>>> kvp(5, (1, 2, 3+5j))
array([-1849.0354+0.j , -25.7735+0.j , -0.0307+0.0875j])
Calculate for a single value at multiple orders:
>>> kvp((4, 4.5, 5), 1)
array([ -184.0309, -568.9585, -1849.0354])
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v, z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v, z, n=1):
"""Compute nth derivative of modified Bessel function Iv(z) with respect
to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v, z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v, z, n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
def h2vp(v, z, n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
@np.deprecate(message="scipy.special.sph_jn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn instead. "
"Note that the new function has a different signature.")
def sph_jn(n, z):
"""Compute spherical Bessel function jn(z) and derivative.
This function computes the value and first derivative of jn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of jn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
See also
--------
spherical_jn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)]
@np.deprecate(message="scipy.special.sph_yn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_yn(n, z):
"""Compute spherical Bessel function yn(z) and derivative.
This function computes the value and first derivative of yn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of yn to compute
z : complex
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
return yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_jnyn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn and "
"scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_jnyn(n, z):
"""Compute spherical Bessel functions jn(z) and yn(z) and derivatives.
This function computes the value and first derivative of jn(z) and yn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of jn and yn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_jn
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)], yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_in is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in instead. "
"Note that the new function has a different signature.")
def sph_in(n, z):
"""Compute spherical Bessel function in(z) and derivative.
This function computes the value and first derivative of in(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of in to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
See also
--------
spherical_in
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
return In[:(n+1)], Inp[:(n+1)]
@np.deprecate(message="scipy.special.sph_kn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_kn(n, z):
"""Compute spherical Bessel function kn(z) and derivative.
This function computes the value and first derivative of kn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of kn to compute
z : complex
Argument at which to evaluate
Returns
-------
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, kn, knp = specfun.sphk(n1, z)
return kn[:(n+1)], knp[:(n+1)]
@np.deprecate(message="scipy.special.sph_inkn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in and "
"scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_inkn(n, z):
"""Compute spherical Bessel functions in(z), kn(z), and derivatives.
This function computes the value and first derivative of in(z) and kn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of in and kn to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_in
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
nm, kn, knp = specfun.sphk(n1, z)
return In[:(n+1)], Inp[:(n+1)], kn[:(n+1)], knp[:(n+1)]
def riccati_jn(n, x):
r"""Compute Ricatti-Bessel function of the first kind and its derivative.
The Ricatti-Bessel function of the first kind is defined as :math:`x
j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first
kind of order :math:`n`.
This function computes the value and first derivative of the
Ricatti-Bessel function for all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
Notes
-----
The computation is carried out via backward recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rctj(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def riccati_yn(n, x):
"""Compute Ricatti-Bessel function of the second kind and its derivative.
The Ricatti-Bessel function of the second kind is defined as :math:`x
y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second
kind of order :math:`n`.
This function computes the value and first derivative of the function for
all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
Notes
-----
The computation is carried out via ascending recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rcty(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def erfinv(y):
"""Inverse function for erf.
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""Inverse function for erfc.
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of error function erf(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1, nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt), specfun.fcszo(1, nt)
def assoc_laguerre(x, n, k=0.0):
"""Compute the generalized (associated) Laguerre polynomial of degree n and order k.
The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**k`` with ``k > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function n.
This is the nth derivative of the digamma (psi) function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m, q)
fc = specfun.fcoef(kd, m, q, a)
return fc[:km]
def mathieu_odd_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
input m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m, q)
fc = specfun.fcoef(kd, m, q, b)
return fc[:km]
def lpmn(m, n, z):
"""Sequence of associated Legendre functions of the first kind.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with ufuncs.errstate(all='ignore'):
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = specfun.lpmn(mp, n, z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind for complex arguments.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with ufuncs.errstate(all='ignore'):
if type == 2:
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = specfun.clpmn(mp, n, real(z), imag(z), type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def lqmn(m, n, z):
"""Sequence of associated Legendre functions of the second kind.
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1, m)
nn = max(1, n)
if iscomplex(z):
q, qd = specfun.clqmn(mm, nn, z)
else:
q, qd = specfun.lqmn(mm, nn, z)
return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E0..En (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n, z):
"""Legendre function of the first kind.
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn, pd = specfun.clpn(n1, z)
else:
pn, pd = specfun.lpn(n1, z)
return pn[:(n+1)], pd[:(n+1)]
def lqn(n, z):
"""Legendre function of the second kind.
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn, qd = specfun.clqn(n1, z)
else:
qn, qd = specfun.lqnb(n1, z)
return qn[:(n+1)], qd[:(n+1)]
def ai_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Ai and its derivative.
Computes the first `nt` zeros, `a`, of the Airy function Ai(x);
first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x);
the corresponding values Ai(a');
and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First `nt` zeros of Ai(x)
ap : ndarray
First `nt` zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first `nt` zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first `nt` zeros of Ai(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def bi_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Bi and its derivative.
Computes the first `nt` zeros, b, of the Airy function Bi(x);
first `nt` zeros, b', of the derivative of the Airy function Bi'(x);
the corresponding values Bi(b');
and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First `nt` zeros of Bi(x)
bp : ndarray
First `nt` zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first `nt` zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first `nt` zeros of Bi(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def lmbda(v, x):
r"""Jahnke-Emden Lambda function, Lambdav(x).
This function is defined as [2]_,
.. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v},
where :math:`\Gamma` is the gamma function and :math:`J_v` is the
Bessel function of the first kind.
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and
Curves" (4th ed.), Dover, 1945
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1, x)
else:
vm, vl, dl = specfun.lamn(v1, x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v, x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbdv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbdn_seq(n, z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb, cpd = specfun.cpbdn(n1, z)
return cpb[:n1+1], cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei(x).
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return (specfun.klvnzo(nt, 1),
specfun.klvnzo(nt, 2),
specfun.klvnzo(nt, 3),
specfun.klvnzo(nt, 4),
specfun.klvnzo(nt, 5),
specfun.klvnzo(nt, 6),
specfun.klvnzo(nt, 7),
specfun.klvnzo(nt, 8))
def pro_cv_seq(m, n, c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, 1)[1][:maxL]
def obl_cv_seq(m, n, c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, -1)[1][:maxL]
def ellipk(m):
"""Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`, which this
function calls.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
"""
return ellipkm1(1 - asarray(m))
def agm(a, b):
"""Arithmetic, Geometric Mean.
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a, b)
agm(a, b)=agm(b, a)
agm(a, a) = a
min(a, b) < agm(a, b) < max(a, b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, ndarray
The total number of combinations.
See Also
--------
binom : Binomial coefficient ufunc
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120L
>>> comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
return _comb_int(N, k)
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
# http://stackoverflow.com/a/16327037/125507
def _range_prod(lo, hi):
"""
Product of a range of numbers.
Returns the product of
lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi
= hi! / (lo-1)!
Breaks into smaller products first for speed:
_range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))
"""
if lo + 1 < hi:
mid = (hi + lo) // 2
return _range_prod(lo, mid) * _range_prod(mid + 1, hi)
if lo == hi:
return lo
return lo * hi
def factorial(n, exact=False):
"""
The factorial of a number or array of numbers.
The factorial of non-negative integer `n` is the product of all
positive integers less than or equal to `n`::
n! = n * (n - 1) * (n - 2) * ... * 1
Parameters
----------
n : int or array_like of ints
Input values. If ``n < 0``, the return value is 0.
exact : bool, optional
If True, calculate the answer exactly using long integer arithmetic.
If False, result is approximated in floating point rapidly using the
`gamma` function.
Default is False.
Returns
-------
nf : float or int or ndarray
Factorial of `n`, as integer or float depending on `exact`.
Notes
-----
For arrays with ``exact=True``, the factorial is computed only once, for
the largest input, with each other result computed in the process.
The output dtype is increased to ``int64`` or ``object`` if necessary.
With ``exact=False`` the factorial is approximated using the gamma
function:
.. math:: n! = \\Gamma(n+1)
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3, 4, 5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(arr, exact=True)
array([ 6, 24, 120])
>>> factorial(5, exact=True)
120L
"""
if exact:
if np.ndim(n) == 0:
return 0 if n < 0 else math.factorial(n)
else:
n = asarray(n)
un = np.unique(n).astype(object)
# Convert to object array of long ints if np.int can't handle size
if un[-1] > 20:
dt = object
elif un[-1] > 12:
dt = np.int64
else:
dt = np.int
out = np.empty_like(n, dtype=dt)
# Handle invalid/trivial values
un = un[un > 1]
out[n < 2] = 1
out[n < 0] = 0
# Calculate products of each range of numbers
if un.size:
val = math.factorial(un[0])
out[n == un[0]] = val
for i in xrange(len(un) - 1):
prev = un[i] + 1
current = un[i + 1]
val *= _range_prod(prev, current)
out[n == current] = val
return out
else:
n = asarray(n)
vals = gamma(n + 1)
return where(n >= 0, vals, 0)
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n, 0, -2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape, 'd')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1, n)
evenn = extract(cond2, n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n, 0, -k):
val = val*j
return val
else:
raise NotImplementedError
def zeta(x, q=None, out=None):
r"""
Riemann zeta function.
The two-argument version is the Hurwitz zeta function:
.. math:: \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x},
Riemann zeta function corresponds to ``q = 1``.
See also
--------
zetac
"""
if q is None:
q = 1
return _zeta(x, q, out)
|
bsd-3-clause
|
btabibian/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
17
|
64758
|
"""
Testing for the tree module (sklearn.tree).
"""
import copy
import pickle
from functools import partial
from itertools import product
import struct
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
from sklearn.tree.tree import CRITERIA_CLF
from sklearn.tree.tree import CRITERIA_REG
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae", "friedman_mse")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(reg.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=3.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=2.5).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_impurity_split=-1.0).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_impurity_decrease=-1.0).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
# test case with no weights passed in
total_weight = X.shape[0]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def check_min_weight_fraction_leaf_with_min_samples_leaf(name, datasets,
sparse=False):
"""Test the interaction between min_weight_fraction_leaf and min_samples_leaf
when sample_weights is not provided in fit."""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
total_weight = X.shape[0]
TreeEstimator = ALL_TREES[name]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test integer min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=5,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
max((total_weight *
est.min_weight_fraction_leaf), 5),
"Failed with {0} "
"min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(name,
est.min_weight_fraction_leaf,
est.min_samples_leaf))
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test float min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=.1,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
max((total_weight * est.min_weight_fraction_leaf),
(total_weight * est.min_samples_leaf)),
"Failed with {0} "
"min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(name,
est.min_weight_fraction_leaf,
est.min_samples_leaf))
def test_min_weight_fraction_leaf_with_min_samples_leaf():
# Check on dense input
for name in ALL_TREES:
yield (check_min_weight_fraction_leaf_with_min_samples_leaf,
name, "iris")
# Check on sparse input
for name in SPARSE_TREES:
yield (check_min_weight_fraction_leaf_with_min_samples_leaf,
name, "multilabel", True)
def test_min_impurity_split():
# test if min_impurity_split creates leaves with impurity
# [0, min_impurity_split) when min_samples_leaf = 1 and
# min_samples_split = 2.
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
min_impurity_split = .5
# verify leaf nodes without min_impurity_split less than
# impurity 1e-7
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
random_state=0)
assert_true(est.min_impurity_split is None,
"Failed, min_impurity_split = {0} > 1e-7".format(
est.min_impurity_split))
try:
assert_warns(DeprecationWarning, est.fit, X, y)
except AssertionError:
pass
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_equal(est.tree_.impurity[node], 0.,
"Failed with {0} "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
# verify leaf nodes have impurity [0,min_impurity_split] when using
# min_impurity_split
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=0)
assert_warns_message(DeprecationWarning,
"Use the min_impurity_decrease",
est.fit, X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_greater_equal(est.tree_.impurity[node], 0,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
assert_less_equal(est.tree_.impurity[node], min_impurity_split,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
def test_min_impurity_decrease():
# test if min_impurity_decrease ensure that a split is made only if
# if the impurity decrease is atleast that value
X, y = datasets.make_classification(n_samples=10000, random_state=42)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# Check default value of min_impurity_decrease, 1e-7
est1 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, random_state=0)
# Check with explicit value of 0.05
est2 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.05, random_state=0)
# Check with a much lower value of 0.0001
est3 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.0001, random_state=0)
# Check with a much lower value of 0.1
est4 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.1, random_state=0)
for est, expected_decrease in ((est1, 1e-7), (est2, 0.05),
(est3, 0.0001), (est4, 0.1)):
assert_less_equal(est.min_impurity_decrease, expected_decrease,
"Failed, min_impurity_decrease = {0} > {1}"
.format(est.min_impurity_decrease,
expected_decrease))
est.fit(X, y)
for node in range(est.tree_.node_count):
# If current node is a not leaf node, check if the split was
# justified w.r.t the min_impurity_decrease
if est.tree_.children_left[node] != TREE_LEAF:
imp_parent = est.tree_.impurity[node]
wtd_n_node = est.tree_.weighted_n_node_samples[node]
left = est.tree_.children_left[node]
wtd_n_left = est.tree_.weighted_n_node_samples[left]
imp_left = est.tree_.impurity[left]
wtd_imp_left = wtd_n_left * imp_left
right = est.tree_.children_right[node]
wtd_n_right = est.tree_.weighted_n_node_samples[right]
imp_right = est.tree_.impurity[right]
wtd_imp_right = wtd_n_right * imp_right
wtd_avg_left_right_imp = wtd_imp_right + wtd_imp_left
wtd_avg_left_right_imp /= wtd_n_node
fractional_node_weight = (
est.tree_.weighted_n_node_samples[node] / X.shape[0])
actual_decrease = fractional_node_weight * (
imp_parent - wtd_avg_left_right_imp)
assert_greater_equal(actual_decrease, expected_decrease,
"Failed with {0} "
"expected min_impurity_decrease={1}"
.format(actual_decrease,
expected_decrease))
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_behaviour_constant_feature_after_splits():
X = np.transpose(np.vstack(([[0, 0, 0, 0, 0, 1, 2, 4, 5, 6, 7]],
np.zeros((4, 11)))))
y = [0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3]
for name, TreeEstimator in ALL_TREES.items():
# do not check extra random trees
if "ExtraTree" not in name:
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 2)
assert_equal(est.tree_.node_count, 5)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = 8 * struct.calcsize("P")
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree_type, dataset in product(SPARSE_TREES, ("clf_small", "toy",
"digits", "multilabel",
"sparse-pos",
"sparse-neg",
"sparse-mix", "zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree_type, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree_type, dataset in product(SPARSE_TREES, ["boston", "reg_small"]):
if tree_type in REG_TREES:
yield (check_sparse_input, tree_type, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree_type, dataset in product(SPARSE_TREES, ["sparse-pos",
"sparse-neg",
"sparse-mix", "zeros"]):
yield (check_sparse_parameters, tree_type, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree_type, dataset in product(SPARSE_TREES, ["sparse-pos",
"sparse-neg",
"sparse-mix", "zeros"]):
yield (check_sparse_criterion, tree_type, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree_type in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree_type)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
def test_mae():
# check MAE criterion produces correct results
# on small toy dataset
dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae",
max_leaf_nodes=2)
dt_mae.fit([[3], [5], [3], [8], [5]], [6, 7, 3, 4, 3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0/3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
dt_mae.fit([[3], [5], [3], [8], [5]], [6, 7, 3, 4, 3],
[0.6, 0.3, 0.1, 1.0, 0.3])
assert_array_equal(dt_mae.tree_.impurity, [7.0/2.3, 3.0/0.7, 4.0/1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
def test_criterion_copy():
# Let's check whether copy of our criterion has the same type
# and properties as original
n_outputs = 3
n_classes = np.arange(3, dtype=np.intp)
n_samples = 100
def _pickle_copy(obj):
return pickle.loads(pickle.dumps(obj))
for copy_func in [copy.copy, copy.deepcopy, _pickle_copy]:
for _, typename in CRITERIA_CLF.items():
criteria = typename(n_outputs, n_classes)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_classes_), _ = result
assert_equal(typename, typename_)
assert_equal(n_outputs, n_outputs_)
assert_array_equal(n_classes, n_classes_)
for _, typename in CRITERIA_REG.items():
criteria = typename(n_outputs, n_samples)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_samples_), _ = result
assert_equal(typename, typename_)
assert_equal(n_outputs, n_outputs_)
assert_equal(n_samples, n_samples_)
|
bsd-3-clause
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/pandas/core/dtypes/inference.py
|
3
|
8373
|
""" basic inference routines """
import collections
import re
import numpy as np
from numbers import Number
from pandas.compat import (PY2, string_types, text_type,
string_and_binary_types)
from pandas._libs import lib
is_bool = lib.is_bool
is_integer = lib.is_integer
is_float = lib.is_float
is_complex = lib.is_complex
is_scalar = lib.isscalar
is_decimal = lib.is_decimal
is_interval = lib.is_interval
def is_number(obj):
"""
Check if the object is a number.
Parameters
----------
obj : The object to check.
Returns
-------
is_number : bool
Whether `obj` is a number or not.
Examples
--------
>>> is_number(1)
True
>>> is_number("foo")
False
"""
return isinstance(obj, (Number, np.number))
def is_string_like(obj):
"""
Check if the object is a string.
Parameters
----------
obj : The object to check.
Examples
--------
>>> is_string_like("foo")
True
>>> is_string_like(1)
False
Returns
-------
is_str_like : bool
Whether `obj` is a string or not.
"""
return isinstance(obj, (text_type, string_types))
def _iterable_not_string(obj):
"""
Check if the object is an iterable but not a string.
Parameters
----------
obj : The object to check.
Returns
-------
is_iter_not_string : bool
Whether `obj` is a non-string iterable.
Examples
--------
>>> _iterable_not_string([1, 2, 3])
True
>>> _iterable_not_string("foo")
False
>>> _iterable_not_string(1)
False
"""
return (isinstance(obj, collections.Iterable) and
not isinstance(obj, string_types))
def is_iterator(obj):
"""
Check if the object is an iterator.
For example, lists are considered iterators
but not strings or datetime objects.
Parameters
----------
obj : The object to check.
Returns
-------
is_iter : bool
Whether `obj` is an iterator.
Examples
--------
>>> is_iterator([1, 2, 3])
True
>>> is_iterator(datetime(2017, 1, 1))
False
>>> is_iterator("foo")
False
>>> is_iterator(1)
False
"""
if not hasattr(obj, '__iter__'):
return False
if PY2:
return hasattr(obj, 'next')
else:
# Python 3 generators have
# __next__ instead of next
return hasattr(obj, '__next__')
def is_file_like(obj):
"""
Check if the object is a file-like object.
For objects to be considered file-like, they must
be an iterator AND have either a `read` and/or `write`
method as an attribute.
Note: file-like objects must be iterable, but
iterable objects need not be file-like.
.. versionadded:: 0.20.0
Parameters
----------
obj : The object to check.
Returns
-------
is_file_like : bool
Whether `obj` has file-like properties.
Examples
--------
>>> buffer(StringIO("data"))
>>> is_file_like(buffer)
True
>>> is_file_like([1, 2, 3])
False
"""
if not (hasattr(obj, 'read') or hasattr(obj, 'write')):
return False
if not is_iterator(obj):
return False
return True
def is_re(obj):
"""
Check if the object is a regex pattern instance.
Parameters
----------
obj : The object to check.
Returns
-------
is_regex : bool
Whether `obj` is a regex pattern.
Examples
--------
>>> is_re(re.compile(".*"))
True
>>> is_re("foo")
False
"""
return isinstance(obj, re._pattern_type)
def is_re_compilable(obj):
"""
Check if the object can be compiled into a regex pattern instance.
Parameters
----------
obj : The object to check.
Returns
-------
is_regex_compilable : bool
Whether `obj` can be compiled as a regex pattern.
Examples
--------
>>> is_re_compilable(".*")
True
>>> is_re_compilable(1)
False
"""
try:
re.compile(obj)
except TypeError:
return False
else:
return True
def is_list_like(obj):
"""
Check if the object is list-like.
Objects that are considered list-like are for example Python
lists, tuples, sets, NumPy arrays, and Pandas Series.
Strings and datetime objects, however, are not considered list-like.
Parameters
----------
obj : The object to check.
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_list_like([1, 2, 3])
True
>>> is_list_like({1, 2, 3})
True
>>> is_list_like(datetime(2017, 1, 1))
False
>>> is_list_like("foo")
False
>>> is_list_like(1)
False
"""
return (hasattr(obj, '__iter__') and
not isinstance(obj, string_and_binary_types))
def is_nested_list_like(obj):
"""
Check if the object is list-like, and that all of its elements
are also list-like.
.. versionadded:: 0.20.0
Parameters
----------
obj : The object to check.
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_nested_list_like([[1, 2, 3]])
True
>>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])
True
>>> is_nested_list_like(["foo"])
False
>>> is_nested_list_like([])
False
>>> is_nested_list_like([[1, 2, 3], 1])
False
Notes
-----
This won't reliably detect whether a consumable iterator (e. g.
a generator) is a nested-list-like without consuming the iterator.
To avoid consuming it, we always return False if the outer container
doesn't define `__len__`.
See Also
--------
is_list_like
"""
return (is_list_like(obj) and hasattr(obj, '__len__') and
len(obj) > 0 and all(is_list_like(item) for item in obj))
def is_dict_like(obj):
"""
Check if the object is dict-like.
Parameters
----------
obj : The object to check.
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
"""
return hasattr(obj, '__getitem__') and hasattr(obj, 'keys')
def is_named_tuple(obj):
"""
Check if the object is a named tuple.
Parameters
----------
obj : The object to check.
Returns
-------
is_named_tuple : bool
Whether `obj` is a named tuple.
Examples
--------
>>> Point = namedtuple("Point", ["x", "y"])
>>> p = Point(1, 2)
>>>
>>> is_named_tuple(p)
True
>>> is_named_tuple((1, 2))
False
"""
return isinstance(obj, tuple) and hasattr(obj, '_fields')
def is_hashable(obj):
"""Return True if hash(obj) will succeed, False otherwise.
Some types will pass a test against collections.Hashable but fail when they
are actually hashed with hash().
Distinguish between these and other types by trying the call to hash() and
seeing if they raise TypeError.
Examples
--------
>>> a = ([],)
>>> isinstance(a, collections.Hashable)
True
>>> is_hashable(a)
False
"""
# Unfortunately, we can't use isinstance(obj, collections.Hashable), which
# can be faster than calling hash. That is because numpy scalars on Python
# 3 fail this test.
# Reconsider this decision once this numpy bug is fixed:
# https://github.com/numpy/numpy/issues/5562
try:
hash(obj)
except TypeError:
return False
else:
return True
def is_sequence(obj):
"""
Check if the object is a sequence of objects.
String types are not included as sequences here.
Parameters
----------
obj : The object to check.
Returns
-------
is_sequence : bool
Whether `obj` is a sequence of objects.
Examples
--------
>>> l = [1, 2, 3]
>>>
>>> is_sequence(l)
True
>>> is_sequence(iter(l))
False
"""
try:
iter(obj) # Can iterate over it.
len(obj) # Has a length associated with it.
return not isinstance(obj, string_and_binary_types)
except (TypeError, AttributeError):
return False
|
mit
|
sjperkins/tensorflow
|
tensorflow/examples/tutorials/word2vec/word2vec_basic.py
|
9
|
9580
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
if data_index == len(data):
buffer[:] = data[:span]
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
|
apache-2.0
|
wazeerzulfikar/scikit-learn
|
sklearn/utils/tests/test_utils.py
|
8
|
9395
|
from itertools import chain
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from scipy.sparse.csgraph import laplacian
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal, ignore_warnings)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.arpack import eigsh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1],
replace=False, n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
# Issue:6581, n_samples can be more when replace is True (default).
assert_equal(len(resample([1, 2], n_samples=5)), 5)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1, 1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in
gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
|
bsd-3-clause
|
AlbertoBonfiglio/Robotics537
|
Homework3/Homework3/classes/armedbandit.py
|
1
|
2409
|
#!/usr/bin/env python
import numpy as np
from scipy import stats
import random
import matplotlib.pyplot as plt
#matplotlib inline
class ArmedBandit(object):
n = 10
arms = np.random.rand(n)
#initialize memory array; has 1 row defaulted to random action index
av = None
def __init__(self, n=10):
self.n = n
self.arms = np.random.rand(n)
pass
def reward(self, prob):
reward = 0
for i in range(100):
if random.random() < prob:
reward += 1
return reward
#greedy method to select best arm based on memory array (historical results)
def bestArm(self, a):
try:
bestArm = 0 #just default to 0
bestMean = 0
for u in a:
avg = np.mean(a[np.where(a[:,0] == u[0])][:, 1]) #calc mean reward for each action
if bestMean < avg:
bestMean = avg
bestArm = int(u[0])
print('Best Mean :' + str(bestMean))
return bestArm
except Exception as ex:
print(Exception.args)
def performOneArmRobberyEGreedy(self, epochs=500, epsilon=0.1):
plt.xlabel("Plays")
plt.ylabel("Avg Reward")
self.epsilon = epsilon
print('starting e-Greedy run ')
self.av = np.array([np.random.randint(0, (self.n+1)), 0]).reshape(1, 2) #av = action-value
for i in range(epochs):
if random.random() > self.epsilon: #greedy arm selection
choice = self.bestArm(self.av)
thisAV = np.array([[choice, self.reward(self.arms[choice])]])
self.av = np.concatenate((self.av, thisAV), axis=0)
else: #random arm selection
choice = np.where(self.arms == np.random.choice(self.arms))[0][0]
thisAV = np.array([[choice, self.reward(self.arms[choice])]]) #choice, reward
self.av = np.concatenate((self.av, thisAV), axis=0) #add to our action-value memory array
#calculate the percentage the correct arm is chosen (you can plot this instead of reward)
percCorrect = 100*(len(self.av[np.where(self.av[:,0] == np.argmax(self.arms))])/len(self.av))
#calculate the mean reward
runningMean = np.mean(self.av[:,1])
plt.scatter(i, runningMean)
plt.show()
|
gpl-2.0
|
zhmz90/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
Chapter2_MorePyMC/separation_plot.py
|
86
|
1494
|
# separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
|
mit
|
patyoon/predict_applied
|
regression_50/run_regression_journal.py
|
1
|
13532
|
from MySQLdb import connect, escape_string
from optparse import OptionParser
import sys
import pickle, os
from math import sqrt
from scipy.sparse import lil_matrix, spdiags, csr_matrix
from numpy import zeros, array, matrix
from sklearn import (cross_validation, svm,
metrics, linear_model)
from sklearn.svm.sparse import SVR
from sklearn.naive_bayes import MultinomialNB
from sklearn.utils import shuffle
import pylab as pl
import numpy as np
#from sklearn import ensemble
import datetime, random
from collections import Counter
from scipy.stats import pearsonr
JNL_LEVEL_ABSTRACT_COUNT_TABLE = 'journal_abstract_MED_jid_word_count'
JNL_LEVEL_TITLE_COUNT_TABLE = 'journal_title_MED_jid_word_count'
CPID_LEVEL_ABSTRACT_COUNT_TABLE = 'cpid_abstract_MED_jid_word_count'
CPID_LEVEL_TITLE_COUNT_TABLE = 'cpid_title_MED_jid_word_count'
CORPUS_LEVEL_ABSTRACT_COUNT_TABLE = 'all_word_abstract_MED_word_count'
CORPUS_LEVEL_TITLE_COUNT_TABLE = 'all_word_title_MED_word_count'
def run_cv(clf, X, Y):
scores = cross_validation.cross_val_score(clf, X, Y, cv=10,
score_func=metrics.mean_squared_error)
return scores
def run_confusion(clf, name, X, Y, Y_dist, sample_names, outfile, thres=0.01):
random.seed(0)
n_samples = X.get_shape()[0]
p = range(n_samples)
random.shuffle(p)
X, Y = X[p], Y[p]
half = int(n_samples / 2)
#Run classifier
sample_names = map (lambda x:sample_names[x], p)[half:]
y_ = clf.fit(X[:half], Y[:half]).predict(X[half:])
y_prob= clf.predict_proba(X[half:])
valid_idx= []
invalid_idx = []
invalid_name = []
diff = y_prob-Y_dist
for i in xrange(len(diff)):
diff_sum = 0
for x in diff[i]:
diff_sum += abs(x)
if diff_sum > thres:
valid_idx.append(i)
else:
invalid_idx.append(i)
invalid_name.append(sample_names[i])
print "too close"
print len(valid_idx), len(invalid_idx)
outfile.write("invalid\n")
for name in invalid_name:
outfile.write(name+'\n')
#Compute confusion matrix
Y_half = Y[half:]
print "Pearson correlation %f", pearsonr(Y_half[valid_idx], y_[valid_idx])
cm = metrics.confusion_matrix(Y_half[valid_idx], y_[valid_idx])
outfile.write(cm)
print cm
#Show confusion matrix
# pl.matshow(cm)
# pl.title('Confusion matrix')
# pl.colorbar()
# pl.show()
#pl.savefig(name+'png')
#pl.figure()
#pickle decorator
def pickler(func):
def inner_pickler(*args, **kwargs):
print args[2], kwargs
name = func.__name__+"_"+'_'.join(map(lambda x: "_".join(x), kwargs.items()))
if os.path.exists(name+".pkl"):
tup = pickle.load(open(name+".pkl", 'rb'))
clf = tup[0]
scores = tup[1]
else:
clf = func(*args[:2])
scores = run_cv(clf, *args[:2])
clf.fit(*args[:2])
pickle.dump((clf, scores,), open(name+".pkl", 'wb'))
run_confusion(clf, name, *args[:5])
return (scores.mean(), scores.std(),)
return inner_pickler
@pickler
def DT(X, Y):
return DecisionTreeClassifier(random_state=0)
@pickler
def multinomial_NB(X, Y):
return MultinomialNB()
@pickler
def svr(X, Y, kernel, param=None, C=1e3):
if kernel in ['rbf', 'poly']:
clf = SVR(kernel, C, param)
else:
clf = SVR(kernel, C)
return clf
@pickler
def logit(X, Y, penalty = 'l1', C=1e5):
return linear_model.LogisticRegression(penalty = penalty, C=C)
@pickler
def ridge(X, Y, alpha=1):
clf = linear_model.Ridge(alpha=1.0)
return clf
@pickler
def sgd_regressor(X,Y):
clf = linear_model.SGDRegressor()
scores = cross_validation.cross_val_score(clf, X, Y, cv=10,
score_func=metrics.mean_squared_error)
clf.fit(X,Y)
return clf
def get_index_word_dicts(cursor, feature_type, threshold=100):
if os.path.exists("word_index_"+feature_type+"_dict.pkl"):
word_index_dict = pickle.load(open("word_index_"+feature_type+"_dict.pkl", "rb"))
index_word_dict = pickle.load(open("index_word_"+feature_type+"dict.pkl", "rb"))
else:
cursor.execute('select word from '+ eval("CORPUS_LEVEL_"+feature_type.upper()+"_COUNT_TABLE") +
' where count >= '+str(threshold)+' order by count desc')
words = map (lambda x : x[0].strip().lower(), cursor.fetchall())
num_words = len(words)
i = 0
word_index_dict = {}
index_word_dict = {}
for word in words:
word_index_dict[word] = i
index_word_dict[i] = word
i+=1
pickle.dump(index_word_dict, open("index_word_"+feature_type+"_dict.pkl", "wb"))
pickle.dump(word_index_dict, open("words_index_"+feature_type+"_dict.pkl", "wb"))
return (index_word_dict, word_index_dict)
def get_level_index_dict(cursor, feature_type, level_type):
if os.path.exists("level_index_"+feature_type+"_"+level_type+"_dict.pkl"):
level_index_dict = pickle.load(open("level_index_"+feature_type+"_"+level_type+"_dict.pkl", "rb"))
else:
#count number of distinct level in cited_paper_words_count table
cursor.execute('SELECT distinct '+level_type+' FROM MED_cpid_refjnl_rlev_ct where rlev!=0 and ' + level_type+' is not null')
levels = map (lambda x : x[0], cursor.fetchall())
i = 0
level_index_dict = {}
for level in levels:
level_index_dict[level] = i
i+=1
pickle.dump(level_index_dict, open("level_index_"+feature_type+"_"+level_type+"_dict.pkl", "wb"))
print len(level_index_dict)
return level_index_dict
def get_class_dist(Y):
dist = [0]*4
for entry in Y:
dist[entry-1] += 1
sum_dist = sum(dist)
for i in xrange(len(dist)):
dist[i] = float(dist[i])/sum_dist
return np.array(dist)
def get_sparse_matrix(cursor, feature_type, level_type):
#lil sparse matrix in scipy package:
#http://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.html
#matrix is (num_level) * num_words+1 size (+1 for the last column)
if os.path.exists("X_word_by_"+level_type+"_"+feature_type+".pkl"):
(X, non_empty_sample_index, non_empty_sample) = pickle.load(open("X_word_by_"+level_type+"_"+feature_type+".pkl", "rb"))
else:
(index_word_dict, word_index_dict) = get_index_word_dicts(cursor, feature_type)
level_index_dict = get_level_index_dict(cursor, feature_type, level_type)
#dict for tracking sparse matrix index and actual cited id
i = 0
print "Reading level and words..."
level_dict = {}
X = lil_matrix((len(level_index_dict),len(word_index_dict),))
print X.get_shape()
non_empty_sample = []
non_empty_sample_index = []
for level in level_index_dict:
#print level
if i%10000 == 0:
print i, "th insert"
cursor.execute('SELECT word, count from '+ eval(level_type.upper() + '_LEVEL_'+feature_type.upper()+'_COUNT_TABLE')
+' where '+level_type+'="' + str(level)+'"')
word_count_dict = dict((word_index_dict[key],value) for key, value in
dict(cursor.fetchall()).iteritems()
if key in word_index_dict)
word_count_sum = float(sum(word_count_dict.values()))
if len(word_count_dict) >= 2:
X[[level_index_dict[level]],
word_count_dict.keys()] = map(lambda x: x/word_count_sum, word_count_dict.values())
i+=1
non_empty_sample_index.append(level_index_dict[level])
non_empty_sample.append(level)
X = X[non_empty_sample_index,:]
pickle.dump((X,non_empty_sample_index,non_empty_sample),
open("X_word_by_"+level_type+"_"+feature_type+".pkl", "wb"))
print "finished inserting count into sparse matrix"
#row standardize X as distribution
return (X, non_empty_sample_index)
def get_label_vector(cursor, feature_type, level_type):
if os.path.exists("Y_"+level_type+".pkl"):
Y = pickle.load(open("Y_"+level_type+".pkl", "rb"))
else:
level_index_dict = get_level_index_dict(cursor, feature_type, level_type)
Y = lil_matrix((len(level_index_dict),1,))
i = 0
for level in level_index_dict:
cursor.execute('SELECT distinct rlev from MED_cpid_refjnl_rlev_ct where '
+str(level_type)+'="'+str(level)+'"')
rlevl = filter(lambda x: x[0]!=0, cursor.fetchall())
if len(rlevl) != 1:
print "two rlevel : ", rlevl, level
Y[[level_index_dict[level]],[0]] = rlevl[0][0]
pickle.dump(Y, open("Y_" + level_type + ".pkl", "wb"))
return Y
def run_all_models(X, Y, Y_dist, level_names, feature_names, outfile, feature_param, level_param):
try:
XS, YS = shuffle(X, Y, random_state=13)
params = {'feature_param':feature_param, 'level_param':level_param}
#2. Run Cross Validation with SVR
#2.1 Linear Kernel
#(mean, std) = svr(XS, YS, kernel='linear')
#outfile.write( "Linear SVR MSE Score: %0.2f (+/- %0.2f)" % (mean, std/2)
#2.2 RBF Kernel with gamma 0.1
#(mean, std) = svr(XS, YS, kernel='rbf', gamma=0.1)
#outfile.write( "SVR RBF Kernel MSE Score: %0.2f (+/- %0.2f)" % (mean, std/2)
#2.2 Polynomial Kernel with degree 2
#(mean, std) = svr(XS, YS, kernel='poly', degree=2)
#outfile.write( "SVR Poly Kernel MSE Score: %0.2f (+/- %0.2f)" % (mean, std/2)
outfile.write(feature_param +"\t"+level_param+"\n")
outfile.write("num samples :"+str(X.get_shape()[0])+
"\tnum features :"+str(X.get_shape()[1])+"\n")
#3 Run Cross Validation with Logit
(mean, std) = logit(XS, YS, Y_dist, level_names, outfile, "logit", **params)
outfile.write( ("Logit MSE Score: %0.2f (+/- %0.2f)\n"
% (mean, std/2)))
# Run multinomial NB
#(mean, std) = multinomial_NB(XS, YS, "nb", **params)
#outfile.write( ("Multinomial NB MSE Score: %0.2f (+/- %0.2f)\n"
# % (mean, std/2)))
# Run SGDRegressor
#(mean, std) = sgd_regressor(XS,YS, "SGD", **params)
#outfile.write( ("SGD Regressor MSE Score: %0.2f (+/- %0.2f)\n"
# % (mean, std/2)))
#outfile.write( "Decision Tree MSE Score: %0.2f (+/- %0.2f)" % (mean, std/2)
# Run gradient boosting
#(mean, std, feature_importances) = gradient_boosting(XS, YS, feature_names)
#outfile.write( "Gradient Boosting MSE Score: %0.2f (+/- %0.2f)" % (sqrt(mean), std/2)
#plot_feature_importance(feature_importances, feature_names)
except IndexError as e:
outfile.write( e +'\n')
def plot_feature_importance(feature_importance, feature_names):
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
pl.subplot(1, 2, 2)
pl.barh(pos, feature_importance[sorted_idx], align='center')
pl.yticks(pos, feature_names[sorted_idx])
pl.xlabel('Relative Importance')
pl.title('Variable Importance')
pl.show()
def get_class_dist(Y):
dist = [0]*4
for entry in Y:
dist[int(entry)-1] += 1
sum_dist = sum(dist)
for i in xrange(len(dist)):
dist[i] = float(dist[i])/sum_dist
return np.array(dist)
if __name__ == "__main__":
usage = ("usage: %prog [options] [word_group_name]")
parser = OptionParser(usage)
(options, args) = parser.parse_args()
conn = connect(host = 'localhost', user = 'root',
db = 'shepard', passwd='shepard')
cursor = conn.cursor()
now = datetime.datetime.now()
f = open("result-"+now.strftime("%Y-%m-%d-%H:%M"), 'a')
abstract_word_index = get_index_word_dicts(cursor, 'abstract')[0]
title_word_index = get_index_word_dicts(cursor, 'title')[0]
index_jnl_dict = {v:k for k, v in get_level_index_dict(cursor, 'title', 'jnl').items()}
index_cpid_dict= {v:k for k, v in get_level_index_dict(cursor, 'title', 'cpid').items()}
(X1, non_empty) = get_sparse_matrix(cursor, 'abstract', 'jnl')
Y1 = get_label_vector(cursor, 'title', 'jnl').toarray().ravel()
Y1_dist = get_class_dist(Y1)
run_all_models(X1, Y1[non_empty], Y1_dist, index_jnl_dict,
abstract_word_index, f, 'abstract', 'jnl')
X1 = None
(X2, non_empty) = get_sparse_matrix(cursor, 'title', 'jnl')
run_all_models(X2, Y1[non_empty], Y1_dist, index_jnl_dict,
title_word_index, f, 'title', 'jnl')
X2 = None
Y1 = None
# (X3, non_empty) = get_sparse_matrix(cursor, 'title', 'cpid')
# Y2 = get_label_vector(cursor, 'title', 'cpid').toarray().ravel()
# Y2_dist = get_class_dist(Y2)
# run_all_models(X3, Y2[non_empty], Y2_dist, index_cpid_dict,
# title_word_index, f, 'title', 'cpid')
# X3 = None
# (X4, non_empty) = get_sparse_matrix(cursor, 'abstract', 'cpid')
# run_all_models(X4, Y2[non_empty], Y2_dist, index_cpid_dict,
# abstract_word_index, f, 'abstract', 'cpid')
|
gpl-2.0
|
wsnook/yawx
|
python/charts.py
|
1
|
7038
|
#!/usr/bin/env python2.7
# coding: utf-8
# This is the recommended way to configure matplotlib to use the
# non-interactive backend and avoid errors on systems that don't have X.
# See https://matplotlib.org/faq/howto_faq.html
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# On with the regular imports...
from datetime import datetime, date
from matplotlib import ticker
import sys
import os
"""Return the float value out of a field like "24.9C" or "45.5%RH"."""
def parse_float(field, units):
if field.endswith(units):
try:
return float(field[:-len(units)])
except ValueError:
return None
else:
return None
"""Return (C, %RH, hPa) from this record's BME280 line, if it has one."""
def parse_record(f):
found_BME280, bm_C, bm_RH, bm_hPa = False, None, None, None
found_Si7021, si_C, si_RH = False, None, None
# Process lines until we hit the record delimiter or end of file
line = f.readline()
while line.strip() != "%%" and line != "":
if line.startswith("BME280"):
fields = line.strip().split("|")
if len(fields) == 4:
found_BME280 = True
bm_C = parse_float(fields[1], "C")
bm_RH = parse_float(fields[2], "%RH")
bm_hPa = parse_float(fields[3], "hPa")
if line.startswith("Si7021"):
fields = line.strip().split("|")
if len(fields) == 3:
found_Si7021 = True
si_C = parse_float(fields[1], "C")
si_RH = parse_float(fields[2], "%RH")
line = f.readline()
# Prefer BME280 and fall back to Si7021 if it's not available
if found_BME280:
return (bm_C, bm_RH, bm_hPa)
elif found_Si7021:
return (si_C, si_RH, None)
else:
# Maybe this was a notes-only record
return (None, None, None)
"""Parse a sensor log and create charts for the given day."""
def parse_file(in_file, day, out_file):
date_str = day.strftime("%Y/%m/%d")
with open(in_file) as f:
# Build arrays to hold x & y points for temperature, %RH, and hPa vs. time.
temp_x, temp_y = [], []
RH_x, RH_y = [], []
hPa_x, hPa_y = [], []
line = f.readline()
while line != "":
# The first line of a record starts with the date, so skip lines
# until we find a record for the day we're trying to chart.
if line.startswith(date_str):
# The date matched, so find the hours and minutes.
date_field = line.strip().split("|")[0]
try:
format = "%Y/%m/%d %H:%M:%S"
timestamp = datetime.strptime(date_field, format)
hours = timestamp.hour + timestamp.minute/60.0
# Now try to get measurements from this record.
C, RH, hPa = parse_record(f)
if C is not None:
temp_x.append(hours)
# Convert to Fahrenheit
temp_y.append(C/5.0*9.0+32.0)
if RH is not None:
RH_x.append(hours)
RH_y.append(RH)
if hPa is not None:
hPa_x.append(hours)
hPa_y.append(hPa)
except ValueError:
# Date won't parse, so ignore this record.
pass
line = f.readline()
# Make a figure with two vertically stacked sub-plots because the y
# range auto-scaling looks bad for C and %RH combined on one chart.
plt.figure(figsize=(6.4, 4.8), dpi=100)
# plt.figure(1)
def configure_grid_and_xaxis():
# Do this first because it stomps on other stuff
plt.minorticks_on()
# Divide the day into 8 major intervals of 3 hours
plt.xlim(xmin=0, xmax=24)
plt.xticks([3, 6, 9, 12, 15, 18, 21])
plt.gca().xaxis.set_minor_locator(ticker.AutoMinorLocator(3))
# Show major & minor grids
plt.gca().grid(b=True, which="major", linestyle="-", alpha=0.4)
plt.gca().grid(b=True, which="minor", linestyle="-", alpha=0.1)
def set_yminor(n):
# Divide each major tick into n sub-sections
plt.gca().yaxis.set_minor_locator(ticker.AutoMinorLocator(n))
# Temperature
plt.subplot(311)
plt.plot(temp_x, temp_y, "r.", alpha=0.3)
configure_grid_and_xaxis()
plt.ylabel(u"°F")
plt.ylim(ymin=67, ymax=88)
# For y-axis grid lines: major=5F, minor=1F
plt.yticks([70, 75, 80, 85])
set_yminor(5)
# %RH
plt.subplot(312)
plt.plot(RH_x, RH_y, "r.", alpha=0.3)
plt.ylabel("%RH")
plt.ylim(ymin=32, ymax=68)
# For y-axis gridlines: major=10%RH, minor=5%RH
plt.yticks([35, 45, 55, 65])
configure_grid_and_xaxis()
set_yminor(2)
# hPa
plt.subplot(313)
plt.plot(hPa_x, hPa_y, "r.", alpha=0.3)
plt.ylabel("hPa")
plt.xlabel("hours since midnight")
# As I write this, the NWS says their barometer reads "30.26 in (1023.5 mb)"
# which corresponds to about 983.2hPa on my sensor. Until yesterday, the
# highest reading I saw in a month of logging was 979hPa. Tropical Storm
# Cindy landed in Texas a few days ago. Here we had low pressure Thursday
# night (min 963hPa), rising pressure all day Friday (max 975hPa), and
# thunderstorms Saturday morning. After the storms, the pressure went up to
# about 981hPa. Today, Sunday, the pressure kept climbing past my previous
# y-axis upper bound of 983hPa. My highest reading so far was 983.7hPa.
plt.ylim(ymin=957, ymax=987)
plt.xlim(xmin=0, xmax=24)
plt.yticks([960, 970, 980])
configure_grid_and_xaxis()
set_yminor(5)
# Adjust the subplot spacing defaults for better separation
plt.subplots_adjust(left=0.1, bottom=0.11, right=0.98, top=0.98, hspace=0.24)
plt.savefig(out_file, transparent=True)
# Usage: `python chart.py YYYY-MM-DD`.
in_file = os.path.expanduser('~/yawx-data/sensors.log')
if not os.path.isfile(in_file):
print in_file, "not found"
sys.exit(1)
# Try to parse the date that we're supposed to make charts for
try:
day = datetime.strptime(sys.argv[1], "%Y-%m-%d")
except ValueError:
print "Bad date string. Use YYYY-MM-DD"
sys.exit(1)
except IndexError:
print "What date do you want charts for?"
sys.exit(1)
# Using the date in the output file will leave finished charts behind at the
# end of the day if you call this script at regular intervals using the
# current day as the command line argument.
out_file = os.path.expanduser(day.strftime("~/yawx-data/%Y-%m-%d_charts.png"))
# Make the charts
parse_file(in_file, day, out_file)
|
mit
|
bhargav/scikit-learn
|
sklearn/manifold/t_sne.py
|
7
|
34867
|
# Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..neighbors import BallTree
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..utils.fixes import astype
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
neighbors = astype(neighbors, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors: int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = astype(params, np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = astype(neighbors, np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017599, 0.00003993],
[ 0.00009891, 0.00021913],
[ 0.00018554, -0.00009357],
[ 0.00009528, -0.00001407]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if init not in ["pca", "random"] or isinstance(init, np.ndarray):
msg = "'init' must be 'pca', 'random' or a NumPy array"
raise ValueError(msg)
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.embedding_ = None
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {}
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = 0.0
# Early exaggeration
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Save the final number of iterations
self.n_iter_final = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
bsd-3-clause
|
dsquareindia/scikit-learn
|
sklearn/linear_model/tests/test_theil_sen.py
|
55
|
9939
|
"""
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import (
assert_almost_equal, assert_greater, assert_less, raises,
)
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1 / (np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
|
bsd-3-clause
|
spatialaudio/sweep
|
lin_sweep_kaiser_window_bandlimited_script5/merge_scripts.py
|
2
|
1696
|
#!/usr/bin/env python3
""" Script to merge scripts"""
import numpy as np
import matplotlib.pyplot as plt
script5 = np.genfromtxt('lin_sweep_kaiser_window_bandlimited_script5.txt')
script5_1 = np.genfromtxt('lin_sweep_kaiser_window_bandlimited_script5_1.txt')
fade_in_list = script5[:, 0]
# Script5
pnr_list = script5[:, 1]
spectrum_distance_list = script5[:, 2]
# Script5_1 (unwindowed deconvolution)
pnr_unwindowed_deconvolution_list = script5_1[:, 1]
spectrum_distance_unwindowed_deconvolution_list = script5_1[:, 2]
plt.plot(fade_in_list, pnr_list, label='Deconvolution: Excitation windowed')
plt.plot(
fade_in_list,
pnr_unwindowed_deconvolution_list,
label='Deconvolution: Excitation unwindowed')
plt.grid()
plt.title('Peak to noise ratio depending on Fade in')
plt.xlabel('Fade in / ms')
plt.ylabel('Peak to noise ratio / dB')
plt.ticklabel_format(useOffset=False)
plt.legend(loc='lower left')
plt.xlim([-10, 1000])
plt.savefig('pnr.png')
plt.close()
NFFT_dirac = 88201
max_measurement = 5974410.59739
plt.plot(fade_in_list, -10 * np.log10(1 / NFFT_dirac *
np.asarray(spectrum_distance_list) / max_measurement), label='Deconvolution: Excitation windowed')
plt.plot(fade_in_list,
-10 * np.log10(1 / NFFT_dirac * np.asarray(spectrum_distance_unwindowed_deconvolution_list) /
max_measurement), label='Deconvolution: Excitation unwindowed')
plt.grid()
plt.title('Spectrum Distance depending on Fade in')
plt.xlabel('Fade in / ms')
plt.ylabel('(Spectrum Distance / max(Spectrum Distance)) / dB')
plt.ticklabel_format(useOffset=False)
plt.legend(loc='lower right')
plt.xlim([-10, 1000])
plt.savefig('spectral_distance.png')
|
mit
|
rupakc/Kaggle-Compendium
|
Bag of Words Meets Bag of Popcorns/Bag of Popcorn.py
|
1
|
3975
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 27 21:28:54 2015
Solution to the Kaggle Competition Bag of Words Meet Bag of Popcorns
Feature Extraction Involves -
1. CountVectorizer
2. TfIdf Vectorizer
Classifiers Included -
SVM (Linear)
Multinomial NB
Perceptron
BernoulliNB
KNN (k=5)
Random Forests
Performance Metrics used during testing -
Accuracy Score
Confusion Matrix
Classification Report (which includes precision,recall and f1-score)
Matthews Correlation Coefficient
Area Under the Curve
@author: Rupak Chakraborty
"""
import pandas as pd
import ClassificationUtils
from sklearn.linear_model import Perceptron
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import metrics
import time
sentiment_data = []
sentiment_labels = []
TRAIN_SIZE = 20000
filename = "Bag of Words Meet Bag of Popcorn (Google Word2Vec)/labeledTrainData.tsv"
data = pd.read_csv(filename,sep="\t")
# Preprocessing the Data
print "Starting Preprocessing Data...."
start = time.time()
for label,review in zip(data["sentiment"],data["review"]):
sentiment_data.append(ClassificationUtils.textCleaningPipeline(review))
sentiment_labels.append(label)
end = time.time()
print "Taken Taken for Data Preprocessing : ",end-start
#Separating the Training and Test Labels
train_labels = sentiment_labels[:TRAIN_SIZE]
test_labels = sentiment_labels[TRAIN_SIZE:]
train_data = sentiment_data[:TRAIN_SIZE]
test_data = sentiment_data[TRAIN_SIZE:]
#Initializing Feature Extractors
count_vec = CountVectorizer()
tfidf = TfidfVectorizer()
#Extracting Training and Test Features
print "Starting Feature Extraction.."
start = time.time()
train_set_bag = count_vec.fit_transform(train_data)
train_set_tfidf = tfidf.fit_transform(train_data)
test_set_bag = count_vec.transform(test_data)
test_set_tfidf = tfidf.transform(test_data)
end = time.time()
print "Time Taken For Feature Extraction : ", end-start
# Initializing Classifiers
perceptron = Perceptron()
mnb = MultinomialNB()
bnb = BernoulliNB()
rf = RandomForestClassifier(n_estimators=91)
knn = KNeighborsClassifier(n_neighbors=3)
# Listing Features and Classifiers
test_feature_list = [test_set_bag,test_set_tfidf]
train_feature_list = [train_set_bag,train_set_tfidf]
feature_name_list = ["Bag of Words","Tf-Idf"]
classifier_name_list = ["Perceptron","Multinomial NB","Bernoulli NB","Random Forest","KNN(K=5)"]
classifier_list = [perceptron,mnb,bnb,knn,rf]
# Iterating for the feature set and the list of classifiers to generate the results
start = time.time()
for train,test,feature_name in zip(train_feature_list,test_feature_list,feature_name_list):
print "---- Results for Feature ------ : ",feature_name
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(train,train_labels)
predicted_labels = classifier.predict(test)
print "-------------------------------------------------\n"
print "Accuracy for ", classifier_name, ": ", metrics.accuracy_score(test_labels,predicted_labels)
print "Classification Report for ", classifier_name, ":\n", metrics.classification_report(test_labels,predicted_labels)
print "Confusion Matrix for ", classifier_name, ":\n", metrics.confusion_matrix(test_labels,predicted_labels)
print "Matthews Correlation Coefficient for ", classifier_name, ":\n ", metrics.matthews_corrcoef(test_labels,predicted_labels)
print "Area Under Curve for ", classifier_name, ":\n",metrics.roc_auc_score(test_labels,predicted_labels)
print "-------------------------------------------------\n"
end = time.time()
print "Time Taken for Entire Classification : ", end-start
|
mit
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/dask/dataframe/tests/test_optimize_dataframe.py
|
4
|
1688
|
import pytest
from operator import getitem
from toolz import merge
import dask
from dask.dataframe.io import dataframe_from_ctable
import dask.dataframe as dd
import pandas as pd
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
dfs = list(dsk.values())
def test_column_optimizations_with_bcolz_and_rewrite():
bcolz = pytest.importorskip('bcolz')
bc = bcolz.ctable([[1, 2, 3], [10, 20, 30]], names=['a', 'b'])
for cols in [None, 'abc', ['abc']]:
dsk2 = merge(dict((('x', i),
(dataframe_from_ctable, bc, slice(0, 2), cols, {}))
for i in [1, 2, 3]),
dict((('y', i),
(getitem, ('x', i), ['a', 'b']))
for i in [1, 2, 3]))
expected = dict((('y', i), (dataframe_from_ctable,
bc, slice(0, 2), ['a', 'b'], {}))
for i in [1, 2, 3])
result = dd.optimize(dsk2, [('y', i) for i in [1, 2, 3]])
assert result == expected
def test_fuse_ave_width():
df = pd.DataFrame({'x': range(10)})
df = dd.from_pandas(df, npartitions=5)
s = ((df.x + 1) + (df.x + 2))
with dask.config.set(fuse_ave_width=4):
a = s.__dask_optimize__(s.dask, s.__dask_keys__())
b = s.__dask_optimize__(s.dask, s.__dask_keys__())
assert len(a) < len(b)
assert len(a) <= 15
|
gpl-3.0
|
simon-pepin/scikit-learn
|
examples/neighbors/plot_nearest_centroid.py
|
264
|
1804
|
"""
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
davidgbe/scikit-learn
|
examples/linear_model/plot_ridge_path.py
|
254
|
1655
|
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
davek44/Basset
|
src/basset_refine.py
|
1
|
9037
|
#!/usr/bin/env python
from __future__ import print_function
from optparse import OptionParser
import os
import subprocess
import sys
import h5py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.metrics import log_loss
import dna_io
from basset_profile import load_profile, znorm
from basset_sat import get_real_nt
'''
basset_refine.py
Refine a promising sequence to maximize its similarity with a desired activity profile.
'''
################################################################################
# main
############################s####################################################
def main():
usage = 'usage: %prog [options] <model_file> <profile_file> <fasta_file>'
parser = OptionParser(usage)
parser.add_option('-a', dest='input_activity_file', help='Optional activity table corresponding to an input FASTA file')
parser.add_option('-e', dest='norm_even', default=False, action='store_true', help='Normalize the weights for the positive and negative datasets to be even [Default: %default]')
parser.add_option('--cuda', dest='cuda', default=False, action='store_true', help='Run on GPGPU [Default: %default]')
parser.add_option('--cudnn', dest='cudnn', default=False, action='store_true', help='Run on GPGPU w/cuDNN [Default: %default]')
parser.add_option('-o', dest='out_dir', default='refine', help='Output directory [Default: %default]')
parser.add_option('-r', dest='norm_preds_file', default=None, help='Prediction means file used to normalize predictions to have equal frequency')
parser.add_option('-s', dest='early_stop', default=.05, type='float', help='Proportion by which the mutation must improve to be accepted [Default: %default]')
parser.add_option('-z', dest='weight_zero', default=1.0, type='float', help='Adjust the weights for the zero samples by this value [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 3:
parser.error('Must provide Basset model file, activity profile file, and sequence FASTA file')
else:
model_file = args[0]
profile_file = args[1]
input_file = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
gpgpu_str = ''
if options.cudnn:
gpgpu_str = '-cudnn'
elif options.cuda:
gpgpu_str = '-cuda'
#################################################################
# prep sequence
#################################################################
# load sequence
seq = ''
for line in open(input_file):
if line[0] == '>':
header = line[1:].rstrip()
else:
seq += line.rstrip()
# convert to one hot coding
seq_1hot = dna_io.dna_one_hot(seq)
seq_1hot = np.reshape(seq_1hot, (1,4,1,len(seq)))
# make initial predictions
seq_preds = predict_seq(model_file, seq_1hot, gpgpu_str, options.out_dir)
num_targets = seq_preds.shape[0]
#################################################################
# prep profile
#################################################################
activity_profile, profile_weights, profile_mask, target_labels = load_profile(profile_file, num_targets, options.norm_even, options.weight_zero)
# normalize predictions
if options.norm_preds_file is not None:
pred_means = np.load(options.norm_preds_file)
# aim for profile weighted average
aim_mean = np.average(pred_means[profile_mask], weights=profile_weights[profile_mask])
# normalize
for ti in range(num_targets):
seq_preds[ti] = znorm(seq_preds[ti], pred_means[ti], aim_mean)
#################################################################
# iteratively refine
#################################################################
nts = 'ACGT'
local_max = False
refined_profile_list = [seq_preds[profile_mask]]
ri = 1
while not local_max:
print('Refinement stage %d' % ri, flush=True)
# write sequence to HDF5
seq_hdf5_file = '%s/seq%d.h5' % (options.out_dir,ri)
seq_hdf5_out = h5py.File(seq_hdf5_file, 'w')
seq_hdf5_out.create_dataset('test_in', data=seq_1hot)
seq_hdf5_out.close()
# perform saturated mutagenesis
sat_hdf5_file = '%s/satmut%d.h5' % (options.out_dir,ri)
torch_cmd = '%s/src/basset_sat_predict.lua %s -rc %s %s %s' % (os.environ['BASSETDIR'],gpgpu_str, model_file, seq_hdf5_file, sat_hdf5_file)
subprocess.call(torch_cmd, shell=True)
# read results into 4 x L x T
sat_hdf5_in = h5py.File(sat_hdf5_file, 'r')
seq_mod_preds = np.array(sat_hdf5_in['seq_mod_preds'])
seq_mod_preds = seq_mod_preds.squeeze()
sat_hdf5_in.close()
# normalize
if options.norm_preds_file is not None:
for ti in range(seq_mod_preds.shape[2]):
seq_mod_preds[:,:,ti] = znorm(seq_mod_preds[:,:,ti], pred_means[ti], aim_mean)
# find sequence prediction
ni, li = get_real_nt(seq)
seq_pred = seq_mod_preds[ni,li,:]
# set to min
seq_dist = log_loss(activity_profile[profile_mask], seq_mod_preds[ni,li,profile_mask], sample_weight=profile_weights[profile_mask])
min_dist = seq_dist
min_entry = (li,ni)
local_max = True
# consider mutated sequences
for li in range(len(seq)):
for ni in range(4):
if seq_1hot[0,ni,0,li] == 0:
# compute distance
mut_dist = log_loss(activity_profile[profile_mask], seq_mod_preds[ni,li,profile_mask], sample_weight=profile_weights[profile_mask])
# compare to min
if mut_dist*1.05 < min_dist:
local_max = False
min_dist = mut_dist
min_entry = (li,ni)
# update
if local_max:
print(' Maximized')
else:
# update trace
li, ni = min_entry
print(' Mutate %d %s --> %s' % (li, seq[li], nts[ni]))
print(' Distance decreases from %.3f to %.3f' % (seq_dist, min_dist), flush=True)
# update sequence
seq = seq[:li] + nts[ni] + seq[li+1:]
dna_io.one_hot_set(seq_1hot[0], li, nts[ni])
# save profile
refined_profile_list.append(seq_mod_preds[ni,li,profile_mask])
ri += 1
#################################################################
# finish
#################################################################
refined_profiles = np.array(refined_profile_list)
# print refinement table
table_out = open('%s/final_table.txt' % options.out_dir, 'w')
for ri in range(refined_profiles.shape[0]):
pi = 0
for ti in range(num_targets):
if profile_mask[ti]:
cols = (ri, ti, refined_profiles[ri,pi])
print('%-3d %3d %.3f' % cols, file=table_out)
pi += 1
table_out.close()
# heat map
if len(refined_profile_list) > 1:
plt.figure()
g = sns.clustermap(np.transpose(refined_profiles), col_cluster=False, metric='euclidean', linewidths=0, yticklabels=target_labels[profile_mask], xticklabels=False)
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.savefig('%s/final_heat.pdf' % options.out_dir)
plt.close()
# output sequence
final_fasta_file = '%s/final_seq.fa' % options.out_dir
final_fasta_out = open(final_fasta_file, 'w')
print('>%s\n%s' % (header, seq), file=final_fasta_out)
final_fasta_out.close()
# perform a new saturated mutagenesis
satmut_targets = ','.join([str(ti) for ti in range(len(activity_profile)) if profile_mask[ti]])
if gpgpu_str != '':
gpgpu_str = '-%s' % gpgpu_str
cmd = 'basset_sat.py %s -n 500 -o %s/final_satmut -t %s %s %s' % (gpgpu_str, options.out_dir, satmut_targets, model_file, final_fasta_file)
subprocess.call(cmd, shell=True)
def predict_seq(model_file, seq_1hot, gpgpu_str, out_dir):
''' Make predictions for the single input sequence. '''
# write sequence to HDF5
seq_hdf5_file = '%s/seq0.h5' % out_dir
seq_hdf5_out = h5py.File(seq_hdf5_file, 'w')
seq_hdf5_out.create_dataset('test_in', data=seq_1hot)
seq_hdf5_out.close()
# predict
preds_file = '%s/preds0.txt' % out_dir
torch_cmd = '%s/src/basset_predict.lua -rc %s %s %s %s' % (os.environ['BASSETDIR'],gpgpu_str, model_file, seq_hdf5_file, preds_file)
subprocess.call(torch_cmd, shell=True)
# read predictions
seq_preds = np.loadtxt(preds_file)
return seq_preds
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
#pdb.runcall(main)
|
mit
|
yunfeilu/scikit-learn
|
sklearn/utils/random.py
|
234
|
10510
|
# Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
|
bsd-3-clause
|
mshakya/PyPiReT
|
piret/functions/function.py
|
1
|
5916
|
#! /usr/bin/env python
"""Check design."""
import sys
import gffutils
import json
from Bio.Seq import Seq
from Bio import SeqIO
import Bio
import os
import luigi
import pandas as pd
from luigi.interface import build
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
import subprocess
from luigi.util import requires
from piret.miscs import RefFile
dir_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(dir_path, '..', '..'))
emapper_path = os.path.join(lib_path, "thirdparty", "eggnog-mapper",
"emapper.py")
emapper_dir = os.path.join(lib_path, "thirdparty", "eggnog-mapper", "data")
sys.path.append(emapper_path)
os.environ["PATH"] += os.pathsep + emapper_path
class GetAAs(luigi.Task):
"""Get amino acid sequences."""
gff_file = luigi.Parameter()
fasta_file = luigi.Parameter()
workdir = luigi.Parameter()
kingdom = luigi.Parameter()
ave_map = luigi.FloatParameter()
def requires(self):
"""Check if those two files are present."""
for f in [self.gff_file, self.fasta_file]:
return RefFile(f)
def gff2faa(self, gff_file, fasta):
"""reads in gff file and fasta to output proteome."""
# get the list of CDS that past the threshold.
imp_cds = self.get_imp_cds(self.ave_map)
# make directories for storing gff database and amino acids
if os.path.exists(os.path.join(self.workdir, "processes", "databases",
self.kingdom)) is False:
os.makedirs(os.path.join(self.workdir, "processes",
"databases", self.kingdom))
db_out = os.path.join(self.workdir, "processes", "databases",
self.kingdom, "piret.db")
with open(os.path.join(self.workdir, "processes", "databases", self.kingdom, "aas.faa"), "w") as f:
if os.path.exists(db_out) is False:
# create db if not already present
db = gffutils.create_db(gff_file, dbfn=db_out, force=True,
keep_order=True,
merge_strategy="create_unique")
else:
# read db if its already present
db = gffutils.FeatureDB(db_out, keep_order=True)
for feat_obj in db.features_of_type("CDS"):
if feat_obj.id in imp_cds.Geneid.tolist():
nt_seqs = feat_obj.sequence(self.fasta_file)
prot_seqs = self.translate(nt_seqs, "CDS")
try:
desc = feat_obj.attributes['product'][0]
except:
desc = "No annotation"
record = SeqRecord(Seq(prot_seqs, IUPAC.protein),
id=feat_obj.id,
description=desc)
SeqIO.write(record, f, "fasta")
def translate(self, nucleotide, type):
"""Takes in a string of nucleotides and translate to AA."""
if type == "CDS":
aa = Bio.Seq.translate(nucleotide, cds=False)
elif type == "exon":
aa = Bio.Seq.translate(nucleotide, cds=False)
else:
aa = "not translated"
return aa
def get_imp_cds(self, ave_map):
"""Read in the read count table
and filter out cds that do not make the threshold
of average reads as shown in ave_map"""
cds_table = os.path.join(self.workdir, "processes", "featureCounts",
self.kingdom, "CDS_count.tsv")
cds_df = pd.read_csv(cds_table, sep="\t", comment="#")
cds_df["mean"] = cds_df.iloc[:, 6:].mean(axis=1)
cds_df = cds_df[cds_df["mean"] > ave_map]
return cds_df
def run(self):
"""Create fasta file."""
self.gff2faa(self.gff_file, self.fasta_file)
def output(self):
"""Expected amino acid output."""
aa_file = os.path.join(self.workdir, "processes", "databases",
self.kingdom, "aas.faa")
return luigi.LocalTarget(aa_file)
@requires(GetAAs)
class RunEmapper(luigi.ExternalTask):
""" Run emapper.
Gets KEGG ids, EC #s, GO#s"""
query_coverage = luigi.Parameter()
subject_coverage = luigi.Parameter()
emapper_dir = luigi.Parameter()
def run_emapper(self):
"""Using the amino acid fasta file, run emapper."""
aa_file = os.path.join(self.workdir, "processes", "databases",
self.kingdom , "aas.faa")
egg_dir = os.path.join(self.workdir, "processes", "emapper", self.kingdom, "emapper")
if os.path.exists(egg_dir) is False:
os.makedirs(egg_dir)
emap = ["python", emapper_path, "-i",
aa_file, "-o", egg_dir, "--data_dir", self.emapper_dir,
"--dbtype", "seqdb", "-m", "diamond", "--target_orthologs", "one2one",
"--query-cover", self.query_coverage,
"--subject-cover", self.subject_coverage,
"--temp_dir", egg_dir]
print(emap)
subprocess.call(emap)
def translate(self, nucleotide, type):
"""Takes in a string of nucleotides and translate to AA."""
if type == "CDS":
aa = Bio.Seq.translate(nucleotide, cds=False)
elif type == "exon":
aa = Bio.Seq.translate(nucleotide, cds=False)
else:
aa = "not translated"
return aa
def run(self):
""" Create fasta file."""
self.run_emapper()
def output(self):
"""Expected output JSON."""
jfile = os.path.join(self.workdir, "processes", "emapper",
self.kingdom, "emapper.emapper.annotations")
return luigi.LocalTarget((jfile))
|
bsd-3-clause
|
wagavulin/arrow
|
python/pyarrow/feather.py
|
4
|
3750
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from distutils.version import LooseVersion
import os
import six
import pandas as pd
from pyarrow.compat import pdapi
from pyarrow.lib import FeatherError # noqa
from pyarrow.lib import RecordBatch, Table
import pyarrow.lib as ext
try:
infer_dtype = pdapi.infer_dtype
except AttributeError:
infer_dtype = pd.lib.infer_dtype
if LooseVersion(pd.__version__) < '0.17.0':
raise ImportError("feather requires pandas >= 0.17.0")
class FeatherReader(ext.FeatherReader):
def __init__(self, source):
self.source = source
self.open(source)
def read(self, columns=None, nthreads=1):
if columns is not None:
column_set = set(columns)
else:
column_set = None
columns = []
names = []
for i in range(self.num_columns):
name = self.get_column_name(i)
if column_set is None or name in column_set:
col = self.get_column(i)
columns.append(col)
names.append(name)
table = Table.from_arrays(columns, names=names)
return table.to_pandas(nthreads=nthreads)
class FeatherWriter(object):
def __init__(self, dest):
self.dest = dest
self.writer = ext.FeatherWriter()
self.writer.open(dest)
def write(self, df):
if isinstance(df, pd.SparseDataFrame):
df = df.to_dense()
if not df.columns.is_unique:
raise ValueError("cannot serialize duplicate column names")
# TODO(wesm): Remove this length check, see ARROW-1732
if len(df.columns) > 0:
batch = RecordBatch.from_pandas(df, preserve_index=False)
for i, name in enumerate(batch.schema.names):
col = batch[i]
self.writer.write_array(name, col)
self.writer.close()
def write_feather(df, dest):
"""
Write a pandas.DataFrame to Feather format
Parameters
----------
df : pandas.DataFrame
dest : string
Local file path
"""
writer = FeatherWriter(dest)
try:
writer.write(df)
except Exception:
# Try to make sure the resource is closed
import gc
writer = None
gc.collect()
if isinstance(dest, six.string_types):
try:
os.remove(dest)
except os.error:
pass
raise
def read_feather(source, columns=None, nthreads=1):
"""
Read a pandas.DataFrame from Feather format
Parameters
----------
source : string file path, or file-like object
columns : sequence, optional
Only read a specific set of columns. If not provided, all columns are
read
nthreads : int, default 1
Number of CPU threads to use when reading to pandas.DataFrame
Returns
-------
df : pandas.DataFrame
"""
reader = FeatherReader(source)
return reader.read(columns=columns, nthreads=nthreads)
|
apache-2.0
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/pandas/tests/io/parser/na_values.py
|
6
|
10526
|
# -*- coding: utf-8 -*-
"""
Tests that NA values are properly handled during
parsing for all of the parsers defined in parsers.py
"""
import numpy as np
from numpy import nan
import pandas.io.parsers as parsers
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, range
class NAvaluesTests(object):
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = np.array([['foo', 'bar'], [nan, 'baz'], [nan, nan]],
dtype=np.object_)
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_non_string_na_values(self):
# see gh-3611: with an odd float format, we can't match
# the string '999.0' exactly but still need float matching
nice = """A,B
-999,1.2
2,-999
3,4.5
"""
ugly = """A,B
-999,1.200
2,-999.000
3,4.500
"""
na_values_param = [['-999.0', '-999'],
[-999, -999.0],
[-999.0, -999],
['-999.0'], ['-999'],
[-999.0], [-999]]
expected = DataFrame([[np.nan, 1.2], [2.0, np.nan],
[3.0, 4.5]], columns=['A', 'B'])
for data in (nice, ugly):
for na_values in na_values_param:
out = self.read_csv(StringIO(data), na_values=na_values)
tm.assert_frame_equal(out, expected)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
assert _NA_VALUES == parsers._NA_VALUES
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = np.array([[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]])
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_numpy_array_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_numpy_array_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_numpy_array_equal(df3.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
def test_na_values_keep_default(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# see gh-4318: passing na_values=None and
# keep_default_na=False yields 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_na_values_na_filter_override(self):
data = """\
A,B
1,A
nan,B
3,C
"""
expected = DataFrame([[1, 'A'], [np.nan, np.nan], [3, 'C']],
columns=['A', 'B'])
out = self.read_csv(StringIO(data), na_values=['B'], na_filter=True)
tm.assert_frame_equal(out, expected)
expected = DataFrame([['1', 'A'], ['nan', 'B'], ['3', 'C']],
columns=['A', 'B'])
out = self.read_csv(StringIO(data), na_values=['B'], na_filter=False)
tm.assert_frame_equal(out, expected)
def test_na_trailing_columns(self):
data = """Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
result = self.read_csv(StringIO(data))
assert result['Date'][1] == '2012-05-12'
assert result['UnitPrice'].isnull().all()
def test_na_values_scalar(self):
# see gh-12224
names = ['a', 'b']
data = '1,2\n2,1'
expected = DataFrame([[np.nan, 2.0], [2.0, np.nan]],
columns=names)
out = self.read_csv(StringIO(data), names=names, na_values=1)
tm.assert_frame_equal(out, expected)
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]],
columns=names)
out = self.read_csv(StringIO(data), names=names,
na_values={'a': 2, 'b': 1})
tm.assert_frame_equal(out, expected)
def test_na_values_dict_aliasing(self):
na_values = {'a': 2, 'b': 1}
na_values_copy = na_values.copy()
names = ['a', 'b']
data = '1,2\n2,1'
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names)
out = self.read_csv(StringIO(data), names=names, na_values=na_values)
tm.assert_frame_equal(out, expected)
tm.assert_dict_equal(na_values, na_values_copy)
def test_na_values_dict_col_index(self):
# see gh-14203
data = 'a\nfoo\n1'
na_values = {0: 'foo'}
out = self.read_csv(StringIO(data), na_values=na_values)
expected = DataFrame({'a': [np.nan, 1]})
tm.assert_frame_equal(out, expected)
def test_na_values_uint64(self):
# see gh-14983
na_values = [2**63]
data = str(2**63) + '\n' + str(2**63 + 1)
expected = DataFrame([str(2**63), str(2**63 + 1)])
out = self.read_csv(StringIO(data), header=None, na_values=na_values)
tm.assert_frame_equal(out, expected)
data = str(2**63) + ',1' + '\n,2'
expected = DataFrame([[str(2**63), 1], ['', 2]])
out = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(out, expected)
def test_empty_na_values_no_default_with_index(self):
# see gh-15835
data = "a,1\nb,2"
expected = DataFrame({'1': [2]}, index=Index(["b"], name="a"))
out = self.read_csv(StringIO(data), keep_default_na=False, index_col=0)
tm.assert_frame_equal(out, expected)
|
mit
|
angelosharpe/FXT
|
FXT/src/models/mock_model.py
|
1
|
1168
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
from src.model import Model
class MockModel(Model):
"""
Model class
"""
def train(self):
pass
def trade(self, broker):
# actualize open trades
self.trades = broker.get_open_trades()
for tick in broker.get_tick_data(self.instrument):
self.buffer.append(tick)
# create pandas dataframe and resample data to 5s - example :-)
df = pd.DataFrame(list(self.buffer), columns=['datetime', 'buy', 'sell'])
df.set_index('datetime', inplace=True)
resampled = df.resample('5Min', how={'buy':'ohlc', 'sell':'ohlc'})
print(resampled)
## should we cose some trade?
#for trade in self.trades:
# self.close_position(broker, trade)
## should we open some new trade?
# do the magic and return 0/vlume/-volume
# volume = xxx
#self.open_position(broker, self.instrument, -1)
#broker.get_account_information()
#print(broker)
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
gpl-2.0
|
ClimateImpactLab/pipelines
|
pipelines/pipelines.py
|
1
|
8498
|
import itertools
import xarray as xr
import pandas as pd
import numpy as np
import shutil
import os
import tempfile
import dill
import json
import inspect
from toolz import memoize
from contextlib import contextmanager
import logging
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('uploader')
logger.setLevel('INFO')
@contextmanager
def temporary_dir():
d = tempfile.mkdtemp()
try:
yield d
finally:
shutil.rmtree(d)
@memoize
def create_dummy_data(tmp, variable):
tmp_path_in = os.path.join(tmp, 'sample_in.nc')
time = pd.date_range('1/1/1981', periods=4, freq='3M')
lats = np.arange(-89.875, 90, 0.25)
lons = np.arange(-179.875, 180, 0.25)
ds = xr.Dataset({
variable: xr.DataArray(
np.random.random((len(time), len(lats), len(lons))),
dims=('time', 'lat', 'lon'),
coords={
'time': time,
'lat': lats,
'lon': lons})
})
ds.to_netcdf(tmp_path_in)
return tmp_path_in
class JobRunner(object):
'''
Generalized job dispatch class
'''
def __init__(
self,
name,
func,
iteration_components,
read_patterns,
write_pattern,
workers=1,
metadata={}):
self._name = name
self._runner = func()
self._iteration_components = iteration_components
self._read_patterns = read_patterns
self._write_pattern = write_pattern
self._njobs = reduce(
lambda x, y: x*y, map(len, self._iteration_components))
self._metadata = metadata
def _get_jobs(self):
for i, job_components in enumerate(
itertools.product(*self._iteration_components)):
job = {}
for job_component in job_components:
job.update(job_component)
yield job
def _build_metadata(self, job):
metadata = {k: v for k, v in self._metadata.items()}
metadata.update({k: str(v) for k, v in job.items()})
return metadata
def run(self):
'''
Invoke a full run for the specified job set
'''
for i, job in enumerate(self._get_jobs()):
logger.info('beginning job {} of {}'.format(i, self._njobs))
try:
metadata = self._build_metadata(job)
kwargs = {k: v for k, v in job.items()}
kwargs.update(
{k: v.format(**metadata) for k, v in self._read_patterns.items()})
kwargs['write_file'] = self._write_pattern.format(**metadata)
kwargs['metadata'] = metadata
self._runner.run(**kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
logging.error(
'Error encountered in job {} of {}:\nJob spec:\n{}\n'
.format(i, self._njobs, job),
exc_info=e)
def run_slurm(self):
for i, job in enumerate(self._get_jobs()):
run_flags = [
'--job-name={}_{}'.format(self._name, i),
'--partition=savio2_bigmem',
'--account=co_laika',
'--qos=laika_bigmem2_normal',
'--nodes=1',
'--ntasks-per-node=5',
'--mem-per-cpu=8000',
'--cpus-per-task=2',
'--time=72:00:00']
metadata = self._build_metadata(job)
kwargs = {k: v for k, v in job.items()}
kwargs.update(
{k: v.format(**metadata)
for k, v in self._read_patterns.items()})
kwargs['write_file'] = self._write_pattern.format(**metadata)
kwargs['metadata'] = metadata
# logger.info('beginning job {} of {}'.format(i, self._njobs))
call = ("{header}\n\npython -m {module} {func} '{job}'".format(
header='#!/bin/bash',
module=self._runner.__module__,
func=self._runner.__name__,
job=json.dumps(kwargs)))
with open('job.sh', 'w+') as f:
f.write(call)
os.system('sbatch {flags} job.sh'.format(flags=' '.join(run_flags)))
os.remove('job.sh')
def test(self):
'''
Test the specified run using dummy data
'''
i = None
have_attempted = {}
with temporary_dir() as tmp:
for i, job in enumerate(self._get_jobs()):
assert len(job) > 0, 'No job specification in job {}'.format(i)
# Ensure paths are specified correctly
# Don't check for presence, but check pattern
for pname, patt in self._read_patterns.items():
assert len(patt.format(**job)) > 0
assert len(self._write_pattern.format(**job)) > 0
test_this_job = False
for k, v in job.items():
if not k in have_attempted:
have_attempted[k] = []
if not v in have_attempted[k]:
test_this_job = True
have_attempted[k].append(v)
if not test_this_job:
continue
# ideally, test to make sure all the inputs exist on datafs
# check_datafs(job)
kwargs = {k: v for k, v in job.items()}
kwargs['write_file'] = os.path.join(tmp, 'sample_out.nc')
kwargs['metadata'] = self._build_metadata(job)
if i > 0:
dummy = self._runner.create_dummy_data_small(tmp, job['variable'])
kwargs.update(dummy)
res = self._runner.run_test_small(**kwargs)
else:
dummy = self._runner.create_dummy_data(tmp, job['variable'])
kwargs.update(dummy)
res = self._runner.run_test(**kwargs)
assert os.path.isfile(kwargs['write_file']), "No file created"
os.remove(kwargs['write_file'])
if i is None:
raise ValueError('No jobs specified')
class JobCreator(object):
def __init__(self, name, func):
self._name = name
self._job_function_getter = func
def __call__(self, *args, **kwargs):
kwargs.update({'name': self._name})
return self._job_function_getter(*args, **kwargs)
def register(name):
def decorator(func):
return JobCreator(name, func)
return decorator
def read_patterns(*patt, **patterns):
if len(patt) == 1 and 'read_pattern' not in patterns:
patterns['read_file'] = patt[0]
elif len(patt) > 1:
raise ValueError('more than one read pattern must use kwargs')
def decorator(func):
def inner(*args, **kwargs):
kwargs.update({'read_patterns': patterns})
return func(*args, **kwargs)
return inner
return decorator
def write_pattern(patt):
def decorator(func):
def inner(*args, **kwargs):
kwargs.update(dict(write_pattern=patt))
return func(*args, **kwargs)
return inner
return decorator
def iterate(*iters):
def decorator(func):
def inner(*args, **kwargs):
kwargs.update(dict(iteration_components=iters))
return func(*args, **kwargs)
return inner
return decorator
def add_metadata(metadata):
def decorator(func):
def inner(*args, **kwargs):
kwargs.update(dict(metadata=metadata))
return func(*args, **kwargs)
return inner
return decorator
def run(workers=1):
def decorator(func):
def inner(*args, **kwargs):
kwargs.update(dict(func=func, workers=workers))
return JobRunner(*args, **kwargs)
return inner
return decorator
def prep_func(func):
funcname = '.'.join([inspect.getmodule(func).__name__, func.__name__])
if not os.path.isdir('pipes'):
os.makedirs('pipes')
fp = 'pipes/{}'.format(funcname)
with open(fp, 'wb+') as f:
pickled = dill.dump(func, f)
return funcname
def load_func(func):
fp = 'pipes/{}'.format(func)
with open(fp, 'rb') as f:
return dill.load(f)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.