repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Vimos/scikit-learn | sklearn/__init__.py | 28 | 3073 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.19.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'exceptions', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'learning_curve', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
Vishruit/DDP_models | code/imports_lib.py | 1 | 1363 | # Matplotlib imports
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Reference counted: Helps with segmentation fault
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
# Keras Imports
import keras
from keras import initializers
import keras.backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, Callback, ProgbarLogger, ReduceLROnPlateau
from keras.callbacks import LambdaCallback, CSVLogger
from keras.layers import Input, Dense, ZeroPadding2D, Convolution2D, MaxPooling2D, BatchNormalization, Flatten, Activation, Dropout
from keras.layers import Reshape, Conv2D, UpSampling3D, Conv3D, MaxPooling3D
from keras.layers.core import Lambda
from keras.metrics import categorical_accuracy, binary_accuracy
from keras.models import Model, load_model, model_from_json
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras.utils import to_categorical
from keras.utils.io_utils import HDF5Matrix
from keras.utils.np_utils import normalize
from keras.utils.vis_utils import plot_model
from keras_contrib.layers import Deconvolution3D
# Miscellaneous
import numpy as np
import tensorflow as tf
import cPickle, gzip, pickle
import argparse, h5py
import os, time, sys
| gpl-3.0 |
jstoxrocky/statsmodels | statsmodels/examples/ex_kernel_regression_sigtest.py | 34 | 3177 | # -*- coding: utf-8 -*-
"""Kernel Regression and Significance Test
Warning: SLOW, 11 minutes on my computer
Created on Thu Jan 03 20:20:47 2013
Author: Josef Perktold
results - this version
----------------------
>>> exec(open('ex_kernel_regression_censored1.py').read())
bw
[ 0.3987821 0.50933458]
[0.39878209999999997, 0.50933457999999998]
sig_test - default
Not Significant
pvalue
0.11
test statistic 0.000434305313291
bootstrap critical values
[ 0.00043875 0.00046808 0.0005064 0.00054151]
sig_test - pivot=True, nboot=200, nested_res=50
pvalue
0.01
test statistic 6.17877171579
bootstrap critical values
[ 5.5658345 5.74761076 5.87386858 6.46012041]
times: 8.34599995613 20.6909999847 666.373999834
"""
from __future__ import print_function
import time
import numpy as np
import statsmodels.nonparametric.api as nparam
import statsmodels.nonparametric.kernel_regression as smkr
if __name__ == '__main__':
t0 = time.time()
#example from test file
nobs = 200
np.random.seed(1234)
C1 = np.random.normal(size=(nobs, ))
C2 = np.random.normal(2, 1, size=(nobs, ))
noise = np.random.normal(size=(nobs, ))
Y = 0.3 +1.2 * C1 - 0.9 * C2 + noise
#self.write2file('RegData.csv', (Y, C1, C2))
#CODE TO PRODUCE BANDWIDTH ESTIMATION IN R
#library(np)
#data <- read.csv('RegData.csv', header=FALSE)
#bw <- npregbw(formula=data$V1 ~ data$V2 + data$V3,
# bwmethod='cv.aic', regtype='lc')
model = nparam.KernelReg(endog=[Y], exog=[C1, C2],
reg_type='lc', var_type='cc', bw='aic')
mean, marg = model.fit()
#R_bw = [0.4017893, 0.4943397] # Bandwidth obtained in R
bw_expected = [0.3987821, 0.50933458]
#npt.assert_allclose(model.bw, bw_expected, rtol=1e-3)
print('bw')
print(model.bw)
print(bw_expected)
print('\nsig_test - default')
print(model.sig_test([1], nboot=100))
t1 = time.time()
res0 = smkr.TestRegCoefC(model, [1])
print('pvalue')
print((res0.t_dist >= res0.test_stat).mean())
print('test statistic', res0.test_stat)
print('bootstrap critical values')
probs = np.array([0.9, 0.95, 0.975, 0.99])
bsort0 = np.sort(res0.t_dist)
nrep0 = len(bsort0)
print(bsort0[(probs * nrep0).astype(int)])
t2 = time.time()
print('\nsig_test - pivot=True, nboot=200, nested_res=50')
res1 = smkr.TestRegCoefC(model, [1], pivot=True, nboot=200, nested_res=50)
print('pvalue')
print((res1.t_dist >= res1.test_stat).mean())
print('test statistic', res1.test_stat)
print('bootstrap critical values')
probs = np.array([0.9, 0.95, 0.975, 0.99])
bsort1 = np.sort(res1.t_dist)
nrep1 = len(bsort1)
print(bsort1[(probs * nrep1).astype(int)])
t3 = time.time()
print('times:', t1-t0, t2-t1, t3-t2)
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.plot(x, y, 'o', alpha=0.5)
# ax.plot(x, y_cens, 'o', alpha=0.5)
# ax.plot(x, y_true, lw=2, label='DGP mean')
# ax.plot(x, sm_mean, lw=2, label='model 0 mean')
# ax.plot(x, mean2, lw=2, label='model 2 mean')
# ax.legend()
#
# plt.show()
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/mpl_toolkits/mplot3d/art3d.py | 10 | 25411 | # art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <[email protected]>
# Minor additions by Ben Axelrod <[email protected]>
'''
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib import lines, text as mtext, path as mpath, colors as mcolors
from matplotlib import artist
from matplotlib.collections import Collection, LineCollection, \
PolyCollection, PatchCollection, PathCollection
from matplotlib.cm import ScalarMappable
from matplotlib.patches import Patch
from matplotlib.colors import Normalize
from matplotlib.cbook import iterable
import warnings
import numpy as np
import math
from . import proj3d
def norm_angle(a):
"""Return angle between -180 and +180"""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
def norm_text_angle(a):
"""Return angle between -90 and +90"""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
def get_dir_vector(zdir):
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif iterable(zdir) and len(zdir) == 3:
return zdir
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
'''
Text object with 3D position and (in the future) direction.
'''
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
'''
*x*, *y*, *z* Position of text
*text* Text string to display
*zdir* Direction of text
Keyword arguments are passed onto :func:`~matplotlib.text.Text`.
'''
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def set_3d_properties(self, z=0, zdir='z'):
x, y = self.get_position()
self._position3d = np.array((x, y, z))
self._dir_vec = get_dir_vector(zdir)
self.stale = True
def draw(self, renderer):
proj = proj3d.proj_trans_points([self._position3d, \
self._position3d + self._dir_vec], renderer.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
if dx==0. and dy==0.:
# atan2 raises ValueError: math domain error on 0,0
angle = 0.
else:
angle = math.degrees(math.atan2(dy, dx))
self.set_position((proj[0][0], proj[1][0]))
self.set_rotation(norm_text_angle(angle))
mtext.Text.draw(self, renderer)
self.stale = False
def text_2d_to_3d(obj, z=0, zdir='z'):
"""Convert a Text to a Text3D object."""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
'''
3D line object.
'''
def __init__(self, xs, ys, zs, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
'''
lines.Line2D.__init__(self, [], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
try:
# If *zs* is a list or array, then this will fail and
# just proceed to juggle_axes().
zs = float(zs)
zs = [zs for x in xs]
except TypeError:
pass
self._verts3d = juggle_axes(xs, ys, zs, zdir)
self.stale = True
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_data(xs, ys)
lines.Line2D.draw(self, renderer)
self.stale = False
def line_2d_to_3d(line, zs=0, zdir='z'):
'''
Convert a 2D line to 3D.
'''
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def path_to_3d_segment(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
def paths_to_3d_segments(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
for path, pathz in zip(paths, zs):
segments.append(path_to_3d_segment(path, pathz, zdir))
return segments
def path_to_3d_segment_with_codes(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment with path codes.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
codes = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
codes.append(code)
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d, codes
def paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments with path codes.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
codes_list = []
for path, pathz in zip(paths, zs):
segs, codes = path_to_3d_segment_with_codes(path, pathz, zdir)
segments.append(segs)
codes_list.append(codes)
return segments, codes_list
class Line3DCollection(LineCollection):
'''
A collection of 3D lines.
'''
def __init__(self, segments, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.collections.LineCollection`.
'''
LineCollection.__init__(self, segments, *args, **kwargs)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_segments(self, segments):
'''
Set 3D segments
'''
self._segments3d = np.asanyarray(segments)
LineCollection.set_segments(self, [])
def do_3d_projection(self, renderer):
'''
Project the points according to renderer matrix.
'''
xyslist = [
proj3d.proj_trans_points(points, renderer.M) for points in
self._segments3d]
segments_2d = [list(zip(xs, ys)) for (xs, ys, zs) in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for (xs, ys, zs) in xyslist:
minz = min(minz, min(zs))
return minz
def draw(self, renderer, project=False):
if project:
self.do_3d_projection(renderer)
LineCollection.draw(self, renderer)
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a LineCollection to a Line3DCollection object."""
segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
'''
3D patch object.
'''
def __init__(self, *args, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
if not iterable(zs):
zs = np.ones(len(verts)) * zs
self._segment3d = [juggle_axes(x, y, z, zdir) \
for ((x, y), z) in zip(verts, zs)]
self._facecolor3d = Patch.get_facecolor(self)
def get_path(self):
return self._path2d
def get_facecolor(self):
return self._facecolor2d
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)))
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def draw(self, renderer):
Patch.draw(self, renderer)
class PathPatch3D(Patch3D):
'''
3D PathPatch object.
'''
def __init__(self, path, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)), self._code3d)
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
else:
return []
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a Patch to a Patch3D object."""
verts = get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a PathPatch to a PathPatch3D object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
'''
A collection of 3D patches.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D patches with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of patches in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PatchCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PatchCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PatchCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0:
return min(vzs)
else:
return np.nan
class Path3DCollection(PathCollection):
'''
A collection of 3D paths.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PathCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PathCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
Convert a :class:`~matplotlib.collections.PatchCollection` into a
:class:`Patch3DCollection` object
(or a :class:`~matplotlib.collections.PathCollection` into a
:class:`Path3DCollection` object).
Keywords:
*za* The location or locations to place the patches in the
collection along the *zdir* axis. Defaults to 0.
*zdir* The axis in which to place the patches. Default is "z".
*depthshade* Whether to shade the patches to give a sense of depth.
Defaults to *True*.
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
col._depthshade = depthshade
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
'''
A collection of 3D polygons.
'''
def __init__(self, verts, *args, **kwargs):
'''
Create a Poly3DCollection.
*verts* should contain 3D coordinates.
Keyword arguments:
zsort, see set_zsort for options.
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
'''
zsort = kwargs.pop('zsort', True)
PolyCollection.__init__(self, verts, *args, **kwargs)
self.set_zsort(zsort)
self._codes3d = None
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
'''
Set z-sorting behaviour:
boolean: if True use default 'average'
string: 'average', 'min' or 'max'
'''
if zsort is True:
zsort = 'average'
if zsort is not False:
if zsort in self._zsort_functions:
zsortfunc = self._zsort_functions[zsort]
else:
return False
else:
zsortfunc = None
self._zsort = zsort
self._sort_zpos = None
self._zsortfunc = zsortfunc
self.stale = True
def get_vector(self, segments3d):
"""Optimize points for projection"""
si = 0
ei = 0
segis = []
points = []
for p in segments3d:
points.extend(p)
ei = si+len(p)
segis.append((si, ei))
si = ei
if len(segments3d) > 0 :
xs, ys, zs = list(zip(*points))
else :
# We need this so that we can skip the bad unpacking from zip()
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
self._segis = segis
def set_verts(self, verts, closed=True):
'''Set 3D vertices.'''
self.get_vector(verts)
# 2D verts will be updated at draw time
PolyCollection.set_verts(self, [], closed)
def set_verts_and_codes(self, verts, codes):
'''Sets 3D vertices with path codes'''
# set vertices with closed=False to prevent PolyCollection from
# setting path codes
self.set_verts(verts, closed=False)
# and set our own codes instead.
self._codes3d = codes
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort(True)
self._facecolors3d = PolyCollection.get_facecolors(self)
self._edgecolors3d = PolyCollection.get_edgecolors(self)
self._alpha3d = PolyCollection.get_alpha(self)
self.stale = True
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def do_3d_projection(self, renderer):
'''
Perform the 3D projection for this object.
'''
# FIXME: This may no longer be needed?
if self._A is not None:
self.update_scalarmappable()
self._facecolors3d = self._facecolors
txs, tys, tzs = proj3d.proj_transform_vec(self._vec, renderer.M)
xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei])
for si, ei in self._segis]
# This extra fuss is to re-order face / edge colors
cface = self._facecolors3d
cedge = self._edgecolors3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
else:
cedge = cedge.repeat(len(xyzlist), axis=0)
# if required sort by depth (furthest drawn first)
if self._zsort:
indices = range(len(xyzlist))
z_segments_2d = [(self._zsortfunc(zs), list(zip(xs, ys)), fc, ec,
idx) for (xs, ys, zs), fc, ec, idx in
zip(xyzlist, cface, cedge, indices)]
z_segments_2d.sort(key=lambda x: x[0], reverse=True)
else:
raise ValueError("whoops")
segments_2d = [s for z, s, fc, ec, idx in z_segments_2d]
if self._codes3d is not None:
codes = [self._codes3d[idx] for z, s, fc, ec, idx in z_segments_2d]
PolyCollection.set_verts_and_codes(self, segments_2d, codes)
else:
PolyCollection.set_verts(self, segments_2d)
self._facecolors2d = [fc for z, s, fc, ec, idx in z_segments_2d]
if len(self._edgecolors3d) == len(cface):
self._edgecolors2d = [ec for z, s, fc, ec, idx in z_segments_2d]
else:
self._edgecolors2d = self._edgecolors3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d.proj_transform_vec(zvec, renderer.M)
return ztrans[2][0]
elif tzs.size > 0 :
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else :
return np.nan
def set_facecolor(self, colors):
PolyCollection.set_facecolor(self, colors)
self._facecolors3d = PolyCollection.get_facecolor(self)
set_facecolors = set_facecolor
def set_edgecolor(self, colors):
PolyCollection.set_edgecolor(self, colors)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
set_edgecolors = set_edgecolor
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.to_rgba_array(
self._facecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.to_rgba_array(
self._edgecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
self.stale = True
def get_facecolors(self):
return self._facecolors2d
get_facecolor = get_facecolors
def get_edgecolors(self):
return self._edgecolors2d
get_edgecolor = get_edgecolors
def draw(self, renderer):
return Collection.draw(self, renderer)
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PolyCollection to a Poly3DCollection object."""
segments_3d, codes = paths_to_3d_segments_with_codes(col.get_paths(),
zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts_and_codes(segments_3d, codes)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D xs, ys can be plotted in the plane
orthogonal to zdir. zdir is normally x, y or z. However, if zdir
starts with a '-' it is interpreted as a compensation for rotate_axes.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with zdir along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so zdir can be x, -x, y, -y, z or -z
"""
if zdir == 'x':
return ys, zs, xs
elif zdir == '-x':
return zs, xs, ys
elif zdir == 'y':
return zs, xs, ys
elif zdir == '-y':
return ys, zs, xs
else:
return xs, ys, zs
def iscolor(c):
try:
if len(c) == 4 or len(c) == 3:
if iterable(c[0]):
return False
if hasattr(c[0], '__float__'):
return True
except:
return False
return False
def get_colors(c, num):
"""Stretch the color argument to provide the required number num"""
if type(c) == type("string"):
c = mcolors.to_rgba(c)
if iscolor(c):
return [c] * num
if len(c) == num:
return c
elif iscolor(c):
return [c] * num
elif len(c) == 0: #if edgecolor or facecolor is specified as 'none'
return [[0,0,0,0]] * num
elif iscolor(c[0]):
return [c[0]] * num
else:
raise ValueError('unknown color format %s' % c)
def zalpha(colors, zs):
"""Modify the alphas of the color list according to depth"""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
colors = get_colors(colors, len(zs))
if zs.size > 0 :
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
colors = [(c[0], c[1], c[2], c[3] * s) for c, s in zip(colors, sats)]
return colors
| apache-2.0 |
iamshang1/Projects | Basic_ML/Image_Classification/traditional_svm.py | 1 | 2023 | import numpy as np
import mahotas as mh
from mahotas.features import surf
import glob
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn import cross_validation
from sklearn.cluster import KMeans
from sklearn.cross_validation import train_test_split
print "loading images"
images = glob.glob('SimpleImageDataset/*.jpg')
features = []
labels = []
alldescriptors = []
def colors(image):
image = image // 64
r,g,b = image.transpose((2,0,1))
pixels = 1 * r + 4 * b + 16 * g
hist = np.bincount(pixels.ravel(), minlength=64)
hist = hist.astype(float)
hist = np.log1p(hist)
return hist
for i in range(len(images)):
print "processing image %i of %i" % (i+1, len(images))
labels.append(images[i][:-len('00.jpg')])
im = mh.imread(images[i])
imgrey = mh.colors.rgb2gray(im, dtype=np.uint8)
features.append(np.concatenate([mh.features.haralick(im).ravel(), mh.features.lbp(imgrey, 30, 10).ravel(), colors(im)]))
surfim = mh.imread(images[i], as_grey=True)
surfim = surfim.astype(np.uint8)
alldescriptors.append(surf.dense(surfim, spacing=16))
concatenated = np.concatenate(alldescriptors)
print "fitting k mean clusters for surf descriptors"
km = KMeans(15)
km.fit(concatenated)
print "creating surf features"
sfeatures = []
for d in alldescriptors:
c = km.predict(d)
sfeatures.append(np.array([np.sum(c == ci) for ci in range(15)]))
features = np.array(features)
sfeatures = np.array(sfeatures, dtype=float)
features = np.concatenate((features, sfeatures), axis=1)
labels = np.array(labels)
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.1, random_state=42, stratify=labels)
clf = Pipeline([('scaler', StandardScaler()),('classifier', OneVsRestClassifier(SVC()))])
print "building model"
clf.fit(X_train,y_train)
score = clf.score(X_test,y_test)
print 'Accuracy of model: %.2f%%' % (score*100.) | mit |
krez13/scikit-learn | sklearn/svm/classes.py | 34 | 40599 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while ``"crammer_singer"``
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
waterponey/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 6 | 40529 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
assert_array_almost_equal(sparse.predict(X_sparse), dense.predict(X))
assert_array_almost_equal(dense.predict(X_sparse), sparse.predict(X))
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
assert_array_almost_equal(sparse.decision_function(X_sparse),
sparse.decision_function(X))
assert_array_almost_equal(dense.decision_function(X_sparse),
sparse.decision_function(X))
assert_array_almost_equal(
np.array(sparse.staged_decision_function(X_sparse)),
np.array(sparse.staged_decision_function(X)))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
ishank08/scikit-learn | sklearn/cluster/tests/test_k_means.py | 26 | 32656 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.cluster import homogeneity_score
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_elkan_results():
rnd = np.random.RandomState(0)
X_normal = rnd.normal(size=(50, 10))
X_blobs, _ = make_blobs(random_state=0)
km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1)
km_elkan = KMeans(algorithm='elkan', n_clusters=5,
random_state=0, n_init=1)
for X in [X_normal, X_blobs]:
km_full.fit(X)
km_elkan.fit(X)
assert_array_almost_equal(km_elkan.cluster_centers_,
km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:, :2],
n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:2, :],
n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1,
algorithm='elkan')
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1,
algorithm='elkan')
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_int_input():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
for dtype in [np.int32, np.int64]:
X_int = np.array(X_list, dtype=dtype)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
for km in fitted_models:
assert_equal(km.cluster_centers_.dtype, np.float64)
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='full')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='elkan')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_full_vs_elkan():
km1 = KMeans(algorithm='full', random_state=13)
km2 = KMeans(algorithm='elkan', random_state=13)
km1.fit(X)
km2.fit(X)
homogeneity_score(km1.predict(X), km2.predict(X)) == 1.0
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be',
km.fit, X)
def test_float_precision():
km = KMeans(n_init=1, random_state=30)
mb_km = MiniBatchKMeans(n_init=1, random_state=30)
inertia = {}
X_new = {}
centers = {}
for estimator in [km, mb_km]:
for is_sparse in [False, True]:
for dtype in [np.float64, np.float32]:
if is_sparse:
X_test = sp.csr_matrix(X_csr, dtype=dtype)
else:
X_test = X.astype(dtype)
estimator.fit(X_test)
# dtype of cluster centers has to be the dtype of the input
# data
assert_equal(estimator.cluster_centers_.dtype, dtype)
inertia[dtype] = estimator.inertia_
X_new[dtype] = estimator.transform(X_test)
centers[dtype] = estimator.cluster_centers_
# ensure the extracted row is a 2d array
assert_equal(estimator.predict(X_test[:1]),
estimator.labels_[0])
if hasattr(estimator, 'partial_fit'):
estimator.partial_fit(X_test[0:3])
# dtype of cluster centers has to stay the same after
# partial_fit
assert_equal(estimator.cluster_centers_.dtype, dtype)
# compare arrays with low precision since the difference between
# 32 and 64 bit sometimes makes a difference up to the 4th decimal
# place
assert_array_almost_equal(inertia[np.float32], inertia[np.float64],
decimal=4)
assert_array_almost_equal(X_new[np.float32], X_new[np.float64],
decimal=4)
assert_array_almost_equal(centers[np.float32], centers[np.float64],
decimal=4)
def test_k_means_init_centers():
# This test is used to check KMeans won't mutate the user provided input
# array silently even if input data and init centers have the same type
X_small = np.array([[1.1, 1.1], [-7.5, -7.5], [-1.1, -1.1], [7.5, 7.5]])
init_centers = np.array([[0.0, 0.0], [5.0, 5.0], [-5.0, -5.0]])
for dtype in [np.int32, np.int64, np.float32, np.float64]:
X_test = dtype(X_small)
init_centers_test = dtype(init_centers)
assert_array_equal(init_centers, init_centers_test)
km = KMeans(init=init_centers_test, n_clusters=3, n_init=1)
km.fit(X_test)
assert_equal(False, np.may_share_memory(km.cluster_centers_, init_centers))
def test_sparse_k_means_init_centers():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
# Get a local optimum
centers = KMeans(n_clusters=3).fit(X).cluster_centers_
# Fit starting from a local optimum shouldn't change the solution
np.testing.assert_allclose(
centers,
KMeans(n_clusters=3,
init=centers,
n_init=1).fit(X).cluster_centers_
)
# The same should be true when X is sparse
X_sparse = sp.csr_matrix(X)
np.testing.assert_allclose(
centers,
KMeans(n_clusters=3,
init=centers,
n_init=1).fit(X_sparse).cluster_centers_
)
def test_sparse_validate_centers():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
# Get a local optimum
centers = KMeans(n_clusters=4).fit(X).cluster_centers_
# Test that a ValueError is raised for validate_center_shape
classifier = KMeans(n_clusters=3, init=centers, n_init=1)
msg = "The shape of the initial centers \(\(4L?, 4L?\)\) " \
"does not match the number of clusters 3"
assert_raises_regex(ValueError, msg, classifier.fit, X)
| bsd-3-clause |
sppalkia/weld | weld-python/tests/grizzly/core/test_series.py | 2 | 4543 | """
Test basic Series functionality.
"""
import numpy as np
import pandas as pd
import pytest
import weld.grizzly as gr
types_ = ['int8', 'uint8', 'int16', 'uint16', 'int32',\
'uint32', 'int64', 'uint64', 'float32', 'float64']
def _test_binop(grizzly_op, pandas_op, name):
"""
Test binary operators, ensuring that their output/data type
matches Pandas.
"""
types = ['int8', 'uint8', 'int16', 'uint16', 'int32',\
'uint32', 'int64', 'uint64', 'float32', 'float64']
for left in types:
for right in types:
a = gr.GrizzlySeries([1, 2, 3], dtype=left)
b = gr.GrizzlySeries([1, 2, 3], dtype=right)
result = grizzly_op(a, b).to_pandas()
a = pd.Series([1, 2, 3], dtype=left)
b = pd.Series([1, 2, 3], dtype=right)
expect = pandas_op(a, b)
assert result.equals(expect), "{}, {} (op={})".format(left, right, name)
def test_add():
# Exhaustive type-to-type test.
_test_binop(gr.GrizzlySeries.add, pd.Series.add, "add")
def test_div():
# Exhaustive type-to-type test.
_test_binop(gr.GrizzlySeries.truediv, pd.Series.truediv, "truediv")
_test_binop(gr.GrizzlySeries.div, pd.Series.div, "div")
def _compare_vs_pandas(func):
"""
Helper to compare Pandas and Grizzly output. `func`
is a generator that can yield one or more values to test
for equality.
"""
expect = func(pd.Series)
got = func(gr.GrizzlySeries)
for (expect, got) in zip(func(pd.Series), func(gr.GrizzlySeries)):
# Make sure we actually used Grizzly for the whole comptuation.
assert isinstance(got, gr.GrizzlySeries)
got = got.to_pandas()
assert got.equals(expect)
def test_arithmetic_expression():
def eval_expression(cls):
a = cls([1, 2, 3], dtype='int32')
b = cls([4, 5, 6], dtype='int32')
c = a + b * b - a
d = (c + a) * (c + b)
e = (d / a) - (d / b)
yield a + b + c * d - e
_compare_vs_pandas(eval_expression)
def test_compare_ops():
def eval_expression(cls):
a = cls([1, np.nan, 3, 4, 6])
b = cls([1, np.nan, 2, 5, np.nan])
yield a == b
yield a > b
yield a >= b
yield a <= b
yield a < b
_compare_vs_pandas(eval_expression)
def test_float_nan():
def eval_expression(cls):
a = cls([1, 2, np.nan])
b = cls([np.nan, 5, 6])
c = a + b * b - a
d = (c + a) * (c + b)
e = (d / a) - (d / b)
yield a + b + c * d - e
_compare_vs_pandas(eval_expression)
def test_scalar():
types = ['int8', 'uint8', 'int16', 'uint16', 'int32',\
'uint32', 'int64', 'uint64', 'float32', 'float64']
for left in types:
for right in types:
a = gr.GrizzlySeries([1, 2, 3], dtype=left)
b = 123
result = (a + b).to_pandas()
a = pd.Series([1, 2, 3], dtype=left)
expect = a + b
assert result.equals(expect), "{}, {} (op={})".format(left, right, "scalar")
def test_indexing():
# We don't compare with Pandas in these tests because the output
# doesn't always match (this is because we don't currently support indexes).
x = gr.GrizzlySeries(list(range(100)), dtype='int64')
assert x[0] == 0
assert x[50] == 50
assert np.array_equal(x[10:50].evaluate().values, np.arange(10, 50, dtype='int64'))
assert np.array_equal(x[:50].evaluate().values, np.arange(50, dtype='int64'))
assert np.array_equal(x[x > 50].evaluate().values, np.arange(51, 100, dtype='int64'))
assert np.array_equal(x[x == 2].evaluate().values, np.array([2], dtype='int64'))
assert np.array_equal(x[x < 0].evaluate().values, np.array([], dtype='int64'))
def test_name():
# Test that names propagate after operations.
x = gr.GrizzlySeries([1,2,3], name="testname")
y = x + x
assert y.evaluate().name == "testname"
y = x.agg(['sum', 'count'])
assert y.evaluate().name == "testname"
y = x[:2]
assert y.evaluate().name == "testname"
y = x[x == 1]
assert y.evaluate().name == "testname"
def test_unsupported_binop_error():
# Test unsupported
from weld.grizzly.core.error import GrizzlyError
with pytest.raises(GrizzlyError):
a = gr.GrizzlySeries([1,2,3])
b = pd.Series([1,2,3])
a.add(b)
with pytest.raises(TypeError):
a = gr.GrizzlySeries(["hello", "world"])
b = gr.GrizzlySeries(["hello", "world"])
a.divide(b)
| bsd-3-clause |
dmargala/qusp | examples/linear_continuum.py | 1 | 9814 | #!/usr/bin/env python
import argparse
import numpy as np
import numpy.ma as ma
import h5py
import qusp
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import scipy.interpolate
from sklearn import linear_model
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
## targets to fit
parser.add_argument("--name", type=str, default=None,
help="skim file name")
parser.add_argument("--abs-beta", type=float, default=3.92,
help='absorption redshift scaling power')
parser.add_argument("--abs-alpha", type=float, default=0.0018,
help='absorption coefficient')
parser.add_argument("--forest-wave-ref", type=float, default=1185.0,
help='forest wave reference')
args = parser.parse_args()
# import data
forest_skim = h5py.File(args.name+'-forest.hdf5', 'r')
forest_flux = np.ma.MaskedArray(forest_skim['flux'][:], mask=forest_skim['mask'][:])
forest_ivar = np.ma.MaskedArray(forest_skim['ivar'][:], mask=forest_skim['mask'][:])
forest_loglam = forest_skim['loglam'][:]
forest_wave = np.power(10.0, forest_loglam)
forest_norm = forest_skim['norm'][:]
quasar_redshifts = forest_skim['z'][:]
redshift_order = np.argsort(quasar_redshifts)
wave_lya = forest_skim.attrs['wave_lya']
forest_pixel_redshifts = (1.0 + quasar_redshifts[:,np.newaxis])*forest_wave/wave_lya - 1.0
print 'Input data shape: ', forest_pixel_redshifts.shape
#### Method 1, find which mean flux slice to use for which pixel
## uses: shifted_rows, shifted_cols, flux.shape, forest_flux, forest_weight, args.subsample_step
## redshift_order, forest_pixel_redshifts
print 'Starting linear continuum fit ...'
num_forests, num_forest_waves = forest_flux.shape
print 'Building model matrix...'
log_forest_wave_ratio = np.log(forest_wave/args.forest_wave_ref)
# raveled_weights = np.ones_like(forest_ivar).ravel()#np.sqrt(forest_ivar/(1.0+forest_ivar*0.055)).ravel()
num_params = 2
param_coefs = np.tile(np.vstack((np.ones(num_forest_waves), log_forest_wave_ratio)).reshape((-1,), order='F'), num_forests)
param_rows = np.repeat(np.arange(num_forests*num_forest_waves), num_params)
param_cols = np.vstack((np.repeat(np.arange(num_forests)*num_params, num_forest_waves),
np.repeat(np.arange(num_forests)*num_params + 1, num_forest_waves))).reshape((-1,), order='F')
# num_params = 1
# param_coefs = np.tile(np.ones(num_forest_waves), num_forests)
# param_rows = np.arange(num_forests*num_forest_waves)
# param_cols = np.repeat(np.arange(num_forests), num_forest_waves)
print 'Param coef shapes: ', param_coefs.shape, param_rows.shape, param_cols.shape
#### Add continuum coefficients
cont_coefs = np.tile(np.ones(num_forest_waves), num_forests)
cont_rows = np.arange(num_forests*num_forest_waves)
cont_cols = np.tile(np.arange(num_forest_waves), num_forests)
print 'Continuum coef shapes: ', cont_coefs.shape, cont_rows.shape, cont_cols.shape
#### Add absorption coefficients
abs_coefs = args.abs_alpha*np.power(1+forest_pixel_redshifts, args.abs_beta)
# forest_min_z = 1.9
# forest_max_z = 3.5
# forest_dz = 0.1
# num_z_bins = int((forest_max_z-forest_min_z)/forest_dz)
# fz_zbin_indices = np.floor((forest_pixel_redshifts.ravel() - forest_min_z)/forest_dz).astype(int)
#
# print fz_zbin_indices.shape
# print fz_zbin_indices
# lo_coef = forest_pixel_redshifts - fz_zbin_indices*dz
# hi_coef = forest_dz-lo_coef
# abs_coefs = np.vstack((lo_coef,hi_coef)).reshape((-1,),order='F')
# abs_cols = fz_zbin_indices
# abs_rows = np.repeat(np.arange(num_forests*num_forest_waves), 2)
# abs_coefs = np.ones(num_forest_waves*num_forests)
# abs_rows = np.arange(num_forests*num_forest_waves)
# abs_cols = fz_zbin_indices
# print abs_coefs.shape
model_coefs = np.concatenate((cont_coefs, param_coefs))
model_rows = np.concatenate((cont_rows, param_rows))
model_cols = np.concatenate((cont_cols, num_forest_waves+param_cols))
print 'Model coef shapes: ', model_coefs.shape, model_rows.shape, model_cols.shape
model_matrix = scipy.sparse.csc_matrix((model_coefs, (model_rows, model_cols)), shape=(num_forests*num_forest_waves,num_forest_waves+num_params*num_forests))
print 'Model matrix shape: ', model_matrix.shape
model_y = ma.log(ma.masked_where(forest_flux <= 0, forest_flux)) + abs_coefs
print 'y shape, num masked pixels: ', model_y.shape, np.sum(model_y.mask)
# valid = ~model_y.mask.ravel()
regr = linear_model.LinearRegression(fit_intercept=False)
print ('... performing fit using %s ...\n' % regr)
# regr.fit(model_matrix[valid], model_y.ravel()[valid])
regr.fit(model_matrix, model_y.ravel())
soln = regr.coef_
continuum = np.exp(soln[:num_forest_waves])
# absorption = soln[num_forest_waves:2*num_forest_waves]
params_a = np.exp(soln[num_forest_waves:num_forest_waves+num_params*num_forests:num_params])
params_b = soln[num_forest_waves+1:num_forest_waves+num_params*num_forests:num_params]
# mean_transmission = np.exp(soln[num_forest_waves+num_params*num_forests:])
print 'Number of continuum params: ', continuum.shape
outfile = h5py.File(args.name+'-linear-continuum.hdf5', 'w')
# copy attributes from input file
for attr_key in forest_skim.attrs:
outfile.attrs[attr_key] = forest_skim.attrs[attr_key]
# save args
outfile.attrs['abs_alpha'] = args.abs_alpha
outfile.attrs['abs_beta'] = args.abs_beta
outfile.attrs['forest_wave_ref'] = args.forest_wave_ref
# save fit results
outfile.create_dataset('params_a', data=params_a, compression="gzip")
outfile.create_dataset('params_b', data=params_b, compression="gzip")
outfile.create_dataset('continuum', data=continuum, compression="gzip")
outfile.create_dataset('continuum_wave', data=forest_wave, compression="gzip")
outfile.close()
# plt.figure(figsize=(12,9))
# plt.plot(np.linspace(forest_min_z, forest_max_z, num_z_bins), mean_transmission, c='k')
# plt.ylabel(r'z')
# plt.xlabel(r'Mean F(z)')
# plt.grid()
# plt.savefig(args.name+'-linear-mean-transmission.png', dpi=100, bbox_inches='tight')
# plt.close()
plt.figure(figsize=(12,9))
plt.step(forest_wave, continuum, c='k', where='mid')
def draw_example(i, **kwargs):
print quasar_redshifts[i]
plt.scatter(forest_wave, forest_norm[i]*forest_flux[i], marker='+', **kwargs)
plt.plot(forest_wave, forest_norm[i]*params_a[i]*np.exp(params_b[i]*log_forest_wave_ratio)*continuum, **kwargs)
# draw_example(1, color='blue')
# draw_example(10, color='green')
# draw_example(100, color='red')
plt.xlim(forest_wave[0], forest_wave[-1])
plt.ylabel(r'Continuum (arb. units)')
plt.xlabel(r'Rest Wavelength ($\AA$)')
plt.grid()
plt.savefig(args.name+'-linear-continuum.png', dpi=100, bbox_inches='tight')
plt.close()
plt.figure(figsize=(12,9))
plt.hist(params_a, bins=np.linspace(-0, 3, 51), histtype='stepfilled', alpha=0.5)
plt.xlabel('a')
plt.grid()
plt.savefig(args.name+'-linear-param-a-dist.png', dpi=100, bbox_inches='tight')
plt.close()
plt.figure(figsize=(12,9))
plt.hist(params_b, bins=np.linspace(-20, 20, 51), histtype='stepfilled', alpha=0.5)
plt.xlabel('b')
plt.grid()
plt.savefig(args.name+'-linear-param-b-dist.png', dpi=100, bbox_inches='tight')
plt.close()
plt.figure(figsize=(12,9))
plt.scatter(params_a, params_b, marker='+')
plt.xlabel('a')
plt.ylabel('b')
plt.ylim(-20,20)
plt.xlim(0,3)
plt.grid()
plt.savefig(args.name+'-linear-param-scatter.png', dpi=100, bbox_inches='tight')
plt.close()
# rest and obs refer to pixel grid
print 'Estimating deltas in forest frame...'
model_flux = params_a[:,np.newaxis]*np.power(forest_wave/args.forest_wave_ref, params_b[:,np.newaxis])*continuum*np.exp(-abs_coefs)
delta_flux_rest = forest_flux/model_flux - 1.0
delta_ivar_rest = forest_ivar*(model_flux*model_flux)
print 'Shifting deltas to observed frame...'
shifted_rows = forest_skim['shifted_rows'][:]
shifted_cols = forest_skim['shifted_cols'][:]
shifted_loglam = forest_skim['shifted_loglam'][:]
delta_flux_obs = ma.empty((num_forests, len(shifted_loglam)))
delta_ivar_obs = ma.empty_like(delta_flux_obs)
delta_flux_obs[shifted_rows, shifted_cols] = delta_flux_rest
delta_ivar_obs[shifted_rows, shifted_cols] = delta_ivar_rest
print 'Plotting mean delta...'
mask_params = (params_a > .01) & (params_a < 100) & (params_b > -20) & (params_b < 20)
print 'Number with okay params: %d' % np.sum(mask_params)
delta_flux_mean = ma.average(delta_flux_obs[mask_params], axis=0, weights=delta_ivar_obs[mask_params])
plt.figure(figsize=(12,9))
plt.plot(np.power(10.0, shifted_loglam), delta_flux_mean)
# plt.ylim(0.06*np.array([-1,1]))
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.ylabel(r'Delta Mean')
plt.grid()
plt.savefig(args.name+'-linear-delta-mean.png', dpi=100, bbox_inches='tight')
plt.close()
delta_flux_var = ma.average((delta_flux_obs[mask_params] - delta_flux_mean)**2, axis=0, weights=delta_ivar_obs[mask_params])
plt.figure(figsize=(12,9))
plt.plot(np.power(10.0, shifted_loglam), delta_flux_var)
plt.ylim(0,0.5)
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.ylabel(r'Delta Variance')
plt.grid()
plt.savefig(args.name+'-linear-delta-var.png', dpi=100, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
benbo/QPR_CP1 | learn_classifier.py | 1 | 6829 | import json
import string
from options import space
from hyperopt import fmin, tpe, Trials, space_eval
from sklearn import svm
from sklearn.linear_model import LogisticRegression as lr
from sklearn.metrics import f1_score
from hyperopt import STATUS_OK
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import cross_val_score
from sklearn.feature_extraction.text import CountVectorizer
from load_data import load_files
from string import maketrans
class Featurizer(object):
def __init__(self,args=None):
self.COUNT_BASE = {
'strip_accents': None,
'stop_words': 'english',
'ngram_range': (1, 1),
'analyzer': 'word',
'max_df': 1.0,
'min_df': 1,
'max_features': None,
'binary': False
}
#for unicode:
#self.remove_punct_map = dict((ord(char), u' ') for char in string.punctuation)
#self.remove_digit_map = dict((ord(char), u' ') for char in string.digits)
#for string:
self.remove_punct_map = maketrans(string.punctuation,' '*len(string.punctuation))
self.remove_digit_map = maketrans(string.digits,' '*len(string.digits))
self.printable = frozenset(string.printable)
self.strip_digits=False
self.strip_punct=False
self.ascii_only = False
self.vec = None
if not args is None:
feat_options = args['features']
for key,value in self.COUNT_BASE.items():
if key not in feat_options:
feat_options[key] = value
self.options = feat_options
self.strip_digits=args['cleaning']['strip_digits']
self.strip_punct=args['cleaning']['strip_punct']
else:
self.options = self.COUNT_BASE
def strip_non_ascii(self, text):
return filter(lambda x: x in self.printable, text)
@staticmethod
def clean_text(text, rm_map):
"""
:param text: String
:param rm_map: Dictionary of characters to replace [ordinal of character to replace, unicode character to replace with]
:return: Cleaned string
"""
return text.translate(rm_map)
def get_features(self,data, options):
#if this operation is very expensive then we should store the results
self.vec = CountVectorizer(strip_accents=options['strip_accents'], stop_words=options['stop_words'],
ngram_range=options['ngram_range'], analyzer=options['analyzer'],
max_df=options['max_df'], min_df=options['min_df'],
max_features=options['max_features'], binary = options['binary'])
return self.vec.fit_transform(data)
def set_options(self,args):
feat_options = args['features']
for key,value in self.COUNT_BASE.items():
if key not in feat_options:
feat_options[key] = value
self.options = feat_options
self.strip_digits=args['cleaning']['strip_digits']
self.strip_punct=args['cleaning']['strip_punct']
def set_ascii(self, value):
if value:
self.ascii_only = True
def run(self,text):
#walk through text cleaning options
if self.ascii_only:
text = [self.strip_non_ascii(t) for t in text]
if self.strip_digits:
text = [self.clean_text(t, self.remove_digit_map) for t in text]
if self.strip_punct:
text = [self.clean_text(t, self.remove_punct_map) for t in text]
return self.get_features(text,self.options)
def run_testdata(self,text):
if self.ascii_only:
text = [self.strip_non_ascii(t) for t in text]
if self.strip_digits:
text = [self.clean_text(t, self.remove_digit_map) for t in text]
if self.strip_punct:
text = [self.clean_text(t, self.remove_punct_map) for t in text]
return self.vec.transform(text)
class ClassifierLearner(object):
def __init__(self, labels, text, num_folds=5,folds=None,cjobs=2,cvjobs=10):
#process and prepare text
self.cjobs=cjobs#n_jobs parameter for classifier
self.cvjobs = cvjobs #n_jobs parameter for cross validation
self.Featurizer = Featurizer()
non_ascii = [self.Featurizer.strip_non_ascii(t) for t in text]
self.text = {'raw':text,'ascii':non_ascii}
#store labels
self.labels = labels
#set up CV
if not folds is None:
self.skf = folds
else:
self.skf = StratifiedKFold(labels, num_folds, random_state=137)
def call_experiment(self, args):
"""
:param args: Hyperopt parameters
:return: Hyperopt feedback
"""
#set up classifier
model = self.get_model(args)
#get raw or non-ascii text (removing non-ascii is expensive)
text = self.text[args['text']['text']]#get raw text or ascii only text
self.Featurizer.set_options(args)
X = self.Featurizer.run(text)
f1 = cross_val_score(model, X, self.labels, cv=self.skf, scoring='f1', n_jobs=self.cvjobs).mean()
print f1
return {'loss': -f1, 'status': STATUS_OK}
def get_model(self,args):
if args['model']['model'] == 'LR':
model = lr(penalty=args['model']['regularizer_lr'], C=args['model']['C_lr'],n_jobs=self.cjobs)
elif args['model']['model'] == 'SVM':
if args['model']['regularizer_svm'] == 'l1':
#squared hinge loss not available when penalty is l1.
model = svm.LinearSVC(C=args['model']['C_svm'], penalty=args['model']['regularizer_svm'],dual=False,n_jobs=self.cjobs)#loss='hinge')
else:
model = svm.LinearSVC(C=args['model']['C_svm'], penalty=args['model']['regularizer_svm'],n_jobs=self.cjobs)
return model
def run(self,max_evals=100):
trials = Trials()
best = fmin(self.call_experiment,
space=space,
algo=tpe.suggest,
max_evals=max_evals,
trials=trials)
#print best
args = space_eval(space, best)
#print "losses:", [-l for l in trials.losses()]
#print max([-l for l in trials.losses()])
#TODO
#return model and featurizer. Don't forget to set ascii option.
featurizer = Featurizer(args)
if args['text']['text'] == 'ascii':
featurizer.set_ascii(True)
return self.get_model(args),featurizer
if __name__ == '__main__':
text, labels, ad_id, phone = load_files()
CL = ClassifierLearner(labels, text, num_folds=5)
mode, featurizer = CL.run(max_evals=5)
| apache-2.0 |
amolkahat/pandas | asv_bench/benchmarks/reshape.py | 2 | 4149 | import string
from itertools import product
import numpy as np
from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long
import pandas as pd
class Melt(object):
def setup(self):
self.df = DataFrame(np.random.randn(10000, 3), columns=['A', 'B', 'C'])
self.df['id1'] = np.random.randint(0, 10, 10000)
self.df['id2'] = np.random.randint(100, 1000, 10000)
def time_melt_dataframe(self):
melt(self.df, id_vars=['id1', 'id2'])
class Pivot(object):
def setup(self):
N = 10000
index = date_range('1/1/2000', periods=N, freq='h')
data = {'value': np.random.randn(N * 50),
'variable': np.arange(50).repeat(N),
'date': np.tile(index.values, 50)}
self.df = DataFrame(data)
def time_reshape_pivot_time_series(self):
self.df.pivot('date', 'variable', 'value')
class SimpleReshape(object):
def setup(self):
arrays = [np.arange(100).repeat(100),
np.roll(np.tile(np.arange(100), 100), 25)]
index = MultiIndex.from_arrays(arrays)
self.df = DataFrame(np.random.randn(10000, 4), index=index)
self.udf = self.df.unstack(1)
def time_stack(self):
self.udf.stack()
def time_unstack(self):
self.df.unstack(1)
class Unstack(object):
def setup(self):
m = 100
n = 1000
levels = np.arange(m)
index = MultiIndex.from_product([levels] * 2)
columns = np.arange(n)
values = np.arange(m * m * n).reshape(m * m, n)
self.df = DataFrame(values, index, columns)
self.df2 = self.df.iloc[:-1]
def time_full_product(self):
self.df.unstack()
def time_without_last_row(self):
self.df2.unstack()
class SparseIndex(object):
def setup(self):
NUM_ROWS = 1000
self.df = DataFrame({'A': np.random.randint(50, size=NUM_ROWS),
'B': np.random.randint(50, size=NUM_ROWS),
'C': np.random.randint(-10, 10, size=NUM_ROWS),
'D': np.random.randint(-10, 10, size=NUM_ROWS),
'E': np.random.randint(10, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
self.df = self.df.set_index(['A', 'B', 'C', 'D', 'E'])
def time_unstack(self):
self.df.unstack()
class WideToLong(object):
def setup(self):
nyrs = 20
nidvars = 20
N = 5000
self.letters = list('ABCD')
yrvars = [l + str(num)
for l, num in product(self.letters, range(1, nyrs + 1))]
columns = [str(i) for i in range(nidvars)] + yrvars
self.df = DataFrame(np.random.randn(N, nidvars + len(yrvars)),
columns=columns)
self.df['id'] = self.df.index
def time_wide_to_long_big(self):
wide_to_long(self.df, self.letters, i='id', j='year')
class PivotTable(object):
def setup(self):
N = 100000
fac1 = np.array(['A', 'B', 'C'], dtype='O')
fac2 = np.array(['one', 'two'], dtype='O')
ind1 = np.random.randint(0, 3, size=N)
ind2 = np.random.randint(0, 2, size=N)
self.df = DataFrame({'key1': fac1.take(ind1),
'key2': fac2.take(ind2),
'key3': fac2.take(ind2),
'value1': np.random.randn(N),
'value2': np.random.randn(N),
'value3': np.random.randn(N)})
def time_pivot_table(self):
self.df.pivot_table(index='key1', columns=['key2', 'key3'])
class GetDummies(object):
def setup(self):
categories = list(string.ascii_letters[:12])
s = pd.Series(np.random.choice(categories, size=1000000),
dtype=pd.api.types.CategoricalDtype(categories))
self.s = s
def time_get_dummies_1d(self):
pd.get_dummies(self.s, sparse=False)
def time_get_dummies_1d_sparse(self):
pd.get_dummies(self.s, sparse=True)
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
untom/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
702nADOS/sumo | tools/net/visum_mapDistricts.py | 1 | 16790 | #!/usr/bin/env python
"""
@file visum_mapDistricts.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-10-25
@version $Id: visum_mapDistricts.py 22608 2017-01-17 06:28:54Z behrisch $
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import string
import sys
import math
from optparse import OptionParser
from matplotlib.collections import LineCollection
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net
import netshiftadaptor
def computeDistance(n1, n2):
xd = n1._coord[0] - n2._coord[0]
yd = n1._coord[1] - n2._coord[1]
return math.sqrt(xd * xd + yd * yd)
def relAngle(angle1, angle2):
angle2 -= angle1
if angle2 > 180:
angle2 = (360. - angle2) * -1.
while angle2 < -180:
angle2 = 360 + angle2
return angle2
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-1", "--net1", dest="net1",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-2", "--net2", dest="net2",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-a", "--nodes1", dest="nodes1",
help="The first matching nodes", metavar="NODELIST")
optParser.add_option("-b", "--nodes2", dest="nodes2",
help="The second matching nodes", metavar="NODELIST")
# parse options
(options, args) = optParser.parse_args()
# read networks
if options.verbose:
print("Reading net#1...")
net1 = sumolib.net.readNet(options.net1)
if options.verbose:
print("Reading net#2...")
net2 = sumolib.net.readNet(options.net2)
# reproject the visum net onto the navteq net
adaptor = netshiftadaptor.NetShiftAdaptor(
net1, net2, options.nodes1.split(","), options.nodes2.split(","))
adaptor.reproject(options.verbose)
# build a speed-up grid
xmin = 100000
xmax = -100000
ymin = 100000
ymax = -100000
for n in net1._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
for n in net2._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
xmin = xmin - .1
xmax = xmax + .1
ymin = ymin - .1
ymax = ymax + .1
CELLSIZE = 100
arr1 = []
arr2 = []
for y in range(0, CELLSIZE):
arr1.append([])
arr2.append([])
for x in range(0, CELLSIZE):
arr1[-1].append([])
arr2[-1].append([])
cw = (xmax - xmin) / float(CELLSIZE)
ch = (ymax - ymin) / float(CELLSIZE)
for n in net2._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr1[int(cy)][int(cx)].append(n)
for n in net1._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr2[int(cy)][int(cx)].append(n)
# map
nmap1to2 = {}
nmap2to1 = {}
nodes1 = net2._nodes
nodes2 = net1._nodes
highwayNodes2 = set()
highwaySinks2 = set()
highwaySources2 = set()
urbanNodes2 = set()
for n2 in nodes2:
noIncoming = 0
noOutgoing = 0
for e in n2._outgoing:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
highwayNodes2.add(n2)
if e.getSpeed() < 99:
noOutgoing = noOutgoing + 1
for e in n2._incoming:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
highwayNodes2.add(n2)
if e.getSpeed() < 99:
noIncoming = noIncoming + 1
if n2 in highwayNodes2:
if noOutgoing == 0:
highwaySinks2.add(n2)
if noIncoming == 0:
highwaySources2.add(n2)
else:
urbanNodes2.add(n2)
print("Found " + str(len(highwaySinks2)) + " highway sinks in net2")
cont = ""
for n in highwaySinks2:
cont = cont + n._id + ", "
print(cont)
cont = ""
print("Found " + str(len(highwaySources2)) + " highway sources in net2")
for n in highwaySources2:
cont = cont + n._id + ", "
print(cont)
fdd = open("dconns.con.xml", "w")
fdd.write("<connections>\n")
highwaySinks1 = set()
highwaySources1 = set()
origDistrictNodes = {}
nnn = {}
for n1 in nodes1:
if n1._id.find('-', 1) < 0:
continue
# if n1._id.find("38208387")<0:
# continue
un1 = None
for e in n1._outgoing:
un1 = e._to
for e in n1._incoming:
un1 = e._from
d = n1._id[:n1._id.find('-', 1)]
if d[0] == '-':
d = d[1:]
if d not in origDistrictNodes:
origDistrictNodes[d] = []
if options.verbose:
print("District: " + d)
isHighwayNode = False
isHighwaySink = False
isHighwaySource = False
noIncoming = 0
noOutgoing = 0
noInConns = 0
noOutConns = 0
for e in un1._outgoing:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
isHighwayNode = True
if e.getSpeed() < 99:
noOutgoing = noOutgoing + 1
if e.getSpeed() > 99:
noOutConns = noOutConns + 1
for e in un1._incoming:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
isHighwayNode = True
if e.getSpeed() < 99:
noIncoming = noIncoming + 1
if e.getSpeed() > 99:
noInConns = noInConns + 1
if options.verbose:
print("Check", un1._id, noOutgoing, noIncoming)
if isHighwayNode:
if noOutgoing == 0:
highwaySinks1.add(n1)
isHighwaySink = True
if noIncoming == 0:
highwaySources1.add(n1)
isHighwaySource = True
# the next is a hack for bad visum-networks
if noIncoming == 1 and noOutgoing == 1 and noInConns == 1 and noOutConns == 1:
highwaySinks1.add(n1)
isHighwaySink = True
highwaySources1.add(n1)
isHighwaySource = True
best = None
bestDist = -1
check = urbanNodes2
if n1 in highwaySinks1:
check = highwaySinks2
elif n1 in highwaySources1:
check = highwaySources2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if bestDist == -1 or bestDist > dist:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
if options.verbose:
print("a: " + d + "<->" + best._id)
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
preBest = best
best = None
bestDist = -1
check = []
if n1 in highwaySinks1 or preBest in highwaySinks2:
check = highwaySources2
elif n1 in highwaySources1 or preBest in highwaySources2:
check = highwaySinks2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if (bestDist == -1 or bestDist > dist) and n2 != preBest:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
print("b: " + d + "<->" + best._id)
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
if options.verbose:
print("Found " + str(len(highwaySinks1)) + " highway sinks in net1")
for n in highwaySinks1:
print(n._id)
print("Found " + str(len(highwaySources1)) + " highway sources in net1")
for n in highwaySources1:
print(n._id)
connectedNodesConnections = {}
for d in nmap1to2:
for n2 in nmap1to2[d]:
if n2 in connectedNodesConnections:
continue
n1i = net1.addNode("i" + n2._id, nnn[n2]._coord)
n1o = net1.addNode("o" + n2._id, nnn[n2]._coord)
haveIncoming = False
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0] != "i" and e._id[0] != "o":
haveIncoming = True
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
haveOutgoing = False
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
haveOutgoing = True
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
if haveIncoming:
e1 = net1.addEdge("o" + n2._id, n2._id, n1o._id, -2)
if haveOutgoing:
net1.addLane(e1, 20, 100.)
else:
for i in range(0, incomingLaneNo):
net1.addLane(e1, 20, 100.)
if len(n2._incoming) == 1:
fdd.write(' <connection from="' + n2._incoming[
0]._id + '" to="' + e1._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
if haveOutgoing:
if options.verbose:
print("has outgoing")
e2 = net1.addEdge("i" + n2._id, n1i._id, n2._id, -2)
if haveIncoming:
net1.addLane(e2, 20, 100.)
else:
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 20, 100.)
if len(n2._outgoing) == 1:
fdd.write(' <connection from="' + e2._id + '" to="' +
n2._outgoing[0]._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
connectedNodesConnections[n2] = [n1i, n1o]
newDistricts = {}
districtSources = {}
districtSinks = {}
mappedDistrictNodes = {}
connNodes = {}
dRemap = {}
for d in nmap1to2:
newDistricts[d] = []
if len(nmap1to2[d]) == 1:
n = nmap1to2[d][0]
if n in dRemap:
districtSources[d] = districtSources[dRemap[n]]
districtSinks[d] = districtSinks[dRemap[n]]
newDistricts[d] = []
newDistricts[d].append(n._id)
continue
else:
dRemap[n] = d
[ni, no] = connectedNodesConnections[n]
if len(ni._outgoing) > 0:
districtSources[d] = ni._outgoing[0]._id
if len(no._incoming) > 0:
districtSinks[d] = no._incoming[0]._id
fdd.write(' <connection from="' + no._incoming[0]._id + '"/>\n')
else:
incomingLaneNoG = 0
outgoingLaneNoG = 0
for n in nmap1to2[d]:
for e in n._incoming:
if e._id[0] != "i" and e._id[0] != "o":
incomingLaneNoG = incomingLaneNoG + e.getLaneNumber()
for e in n._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
outgoingLaneNoG = outgoingLaneNoG + e.getLaneNumber()
p1 = [0, 0]
p11 = [0, 0]
p12 = [0, 0]
p2 = [0, 0]
for n in nmap1to2[d]:
p1[0] = p1[0] + n._coord[0]
p1[1] = p1[1] + n._coord[1]
p2[0] = p2[0] + nnn[n]._coord[0]
p2[1] = p2[1] + nnn[n]._coord[1]
p2[0] = (p1[0] + p2[0]) / float(len(origDistrictNodes[d]) * 2)
p2[1] = (p1[1] + p2[1]) / float(len(origDistrictNodes[d]) * 2)
dn2i = net1.addNode("cci" + d, p2)
dn2o = net1.addNode("cci" + d, p2)
p11[0] = p1[0] / float(len(origDistrictNodes[d]))
p11[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1o = net1.addNode("co" + d, p11)
e1 = net1.addEdge("co" + d, dn1o._id, dn2o._id, -2)
for i in range(0, incomingLaneNoG):
net1.addLane(e1, 22, 100.)
districtSinks[d] = e1._id
p12[0] = p1[0] / float(len(origDistrictNodes[d]))
p12[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1i = net1.addNode("ci" + d, p12)
e2 = net1.addEdge("ci" + d, dn2i._id, dn1i._id, -2)
for i in range(0, outgoingLaneNoG):
net1.addLane(e2, 21, 100.)
districtSources[d] = e2._id
runningOutLaneNumber = 0
runningInLaneNumber = 0
for n2 in nmap1to2[d]:
[ni, no] = connectedNodesConnections[n2]
print("In: " + ni._id + " " + str(len(ni._incoming)) +
" " + str(len(ni._outgoing)))
print("Out: " + no._id + " " + str(len(no._incoming)) +
" " + str(len(no._outgoing)))
if len(no._incoming) > 0:
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0] != "i" and e._id[0] != "o":
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
e1 = net1.addEdge("o" + d + "#" + n2._id, no._id, dn1o._id, -2)
for i in range(0, incomingLaneNo):
net1.addLane(e1, 19, 100.)
fdd.write(' <connection from="' + "o" + d + "#" + n2._id + '" to="' + dn1o._outgoing[
0]._id + '" lane="' + str(i) + ':' + str(runningOutLaneNumber) + '"/>\n')
runningOutLaneNumber = runningOutLaneNumber + 1
fdd.write(
' <connection from="' + dn1o._outgoing[0]._id + '"/>\n')
if incomingLaneNo == 0:
net1.addLane(e1, 19, 100.)
runningOutLaneNumber = runningOutLaneNumber + 1
if len(ni._outgoing) > 0:
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
e2 = net1.addEdge("i" + d + "#" + n2._id, dn1i._id, ni._id, -2)
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 18, 100.)
fdd.write(' <connection from="' + dn1i._incoming[
0]._id + '" to="' + "i" + d + "#" + n2._id + '" lane="' + str(runningInLaneNumber) + ':' + str(i) + '"/>\n')
runningInLaneNumber = runningInLaneNumber + 1
if outgoingLaneNo == 0:
net1.addLane(e2, 18, 100.)
runningInLaneNumber = runningInLaneNumber + 1
fd = open("districts.xml", "w")
fd.write("<tazs>\n")
for d in newDistricts:
fd.write(' <taz id="' + d + '">\n')
if d in districtSources:
fd.write(
' <tazSource id="' + districtSources[d] + '" weight="1"/>\n')
if d in districtSinks:
fd.write(
' <tazSink id="' + districtSinks[d] + '" weight="1"/>\n')
fd.write(' </taz>\n')
fd.write("</tazs>\n")
fd.close()
def writeNode(fd, node):
fd.write(" <node id=\"" + node._id + "\" x=\"" +
str(node._coord[0]) + "\" y=\"" + str(node._coord[1]) + "\"/>\n")
def writeEdge(fd, edge, withGeom=True):
fd.write(" <edge id=\"" + edge._id + "\" fromNode=\"" +
edge._from._id + "\" toNode=\"" + edge._to._id)
fd.write("\" speed=\"" + str(edge._speed))
fd.write("\" priority=\"" + str(edge._priority))
if withGeom:
fd.write("\" spreadType=\"center")
fd.write("\" numLanes=\"" + str(len(edge._lanes)) + "\"")
shape = edge.getShape()
if withGeom:
fd.write(" shape=\"")
for i, c in enumerate(shape):
if i != 0:
fd.write(" ")
fd.write(str(c[0]) + "," + str(c[1]))
fd.write("\"")
fd.write("/>\n")
def writeNodes(net):
fd = open("nodes.xml", "w")
fd.write("<nodes>\n")
for node in net._nodes:
writeNode(fd, node)
fd.write("</nodes>\n")
fd.close()
def writeEdges(net):
fd = open("edges.xml", "w")
fd.write("<edges>\n")
for edge in net._edges:
if edge._id.find("#") > 0 or edge._id.find("c") >= 0 or edge._id.find("i") >= 0:
writeEdge(fd, edge, False)
else:
writeEdge(fd, edge)
fd.write("</edges>\n")
fd.close()
fdd.write("</connections>\n")
writeNodes(net1)
writeEdges(net1)
| gpl-3.0 |
tomlof/scikit-learn | sklearn/model_selection/_validation.py | 6 | 38471 | """
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..utils.metaestimators import _safe_split
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import check_cv
from ..preprocessing import LabelEncoder
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, groups))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
test_score = _score(estimator, X_test, y_test, scorer)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score, test_score] if return_train_score else [test_score]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, groups=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator. For
method='predict_proba', the columns correspond to the classes
in sorted order.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
le = LabelEncoder()
y = le.fit_transform(y)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, groups))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
n_classes = len(set(y))
predictions_ = np.zeros((X_test.shape[0], n_classes))
if method == 'decision_function' and len(estimator.classes_) == 2:
predictions_[:, estimator.classes_[-1]] = predictions
else:
predictions_[:, estimator.classes_] = predictions
predictions = predictions_
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, groups=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, groups, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, groups):
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
estimator.fit(X_train, y_train)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return safe_indexing(y, indices)
def learning_curve(estimator, X, y, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0, shuffle=False,
random_state=None):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
shuffle : boolean, optional
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Store it as list as we will be iterating over the list multiple times
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv_iter)
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in train_test_proportions)
out = np.array(out)
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
# NOTE do not change order of iteration to allow one time cv splitters
for train, test in cv.split(X, y, groups) for v in param_range)
out = np.asarray(out)
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
Nyker510/scikit-learn | sklearn/metrics/ranking.py | 75 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
oselivanov/matplotlib_iterm2 | matplotlib_iterm2/backend_iterm2.py | 1 | 1787 | """iTerm2 exterimental backend for matplotlib.
Based on iTerm2 nightly build feature - displaying images in terminal.
http://iterm2.com/images.html#/section/home
Example:
import matplotlib
matplotlib.use('xxx')
from pylab import *
plot([1,2,3])
show()
"""
__author__ = 'Oleg Selivanov <[email protected]>'
import os
import subprocess
import tempfile
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import FigureManagerBase
from matplotlib.figure import Figure
from PIL import Image
# TODO(oleg): Show better message if PIL/Pillow is not installed.
# TODO(oleg): Check if imgcat script exists.
def show():
for manager in Gcf.get_all_fig_managers():
manager.show()
# TODO(oleg): Check if it's okay to destroy manager here.
Gcf.destroy(manager.num)
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasAgg(thisFig)
manager = FigureManagerTemplate(canvas, num)
return manager
class FigureManagerTemplate(FigureManagerBase):
def show(self):
canvas = self.canvas
canvas.draw()
if matplotlib.__version__ < '1.2':
buf = canvas.buffer_rgba(0, 0)
else:
buf = canvas.buffer_rgba()
render = canvas.get_renderer()
w, h = int(render.width), int(render.height)
im = Image.frombuffer('RGBA', (w, h), buf, 'raw', 'RGBA', 0, 1)
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f:
im.save(f.name)
subprocess.call(['imgcat', f.name])
os.unlink(f.name)
FigureManager = FigureManagerBase
| mit |
Fokko/incubator-airflow | airflow/hooks/base_hook.py | 1 | 4313 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base class for all hooks"""
import os
import random
from typing import Iterable
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(LoggingMixin):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
@classmethod
@provide_session
def _get_connections_from_db(cls, conn_id, session=None):
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def _get_connection_from_env(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
return conn
@classmethod
def get_connections(cls, conn_id: str) -> Iterable[Connection]:
"""
Get all connections as an iterable.
:param conn_id: connection id
:return: array of connections
"""
conn = cls._get_connection_from_env(conn_id)
if conn:
conns = [conn]
else:
conns = cls._get_connections_from_db(conn_id)
return conns
@classmethod
def get_connection(cls, conn_id: str) -> Connection:
"""
Get random connection selected from all connections configured with this connection id.
:param conn_id: connection id
:return: connection
"""
conn = random.choice(list(cls.get_connections(conn_id)))
if conn.host:
log = LoggingMixin().log
log.info("Using connection to: %s", conn.debug_info())
return conn
@classmethod
def get_hook(cls, conn_id: str) -> "BaseHook":
"""
Returns default hook for this connection id.
:param conn_id: connection id
:return: default hook for this connection
"""
# TODO: set method return type to BaseHook class when on 3.7+.
# See https://stackoverflow.com/a/33533514/3066428
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
"""Returns connection for the hook."""
raise NotImplementedError()
def get_records(self, sql):
"""Returns records for the sql query (for hooks that support SQL)."""
# TODO: move it out from the base hook. It belongs to some common SQL hook most likely
raise NotImplementedError()
def get_pandas_df(self, sql):
"""Returns pandas dataframe for the sql query (for hooks that support SQL)."""
# TODO: move it out from the base hook. It belongs to some common SQL hook most likely
raise NotImplementedError()
def run(self, sql):
"""Runs SQL query (for hooks that support SQL)."""
# TODO: move it out from the base hook. It belongs to some common SQL hook most likely
raise NotImplementedError()
| apache-2.0 |
jhamman/xarray | xarray/core/coordinates.py | 1 | 13197 | from contextlib import contextmanager
from typing import (
TYPE_CHECKING,
Any,
Dict,
Hashable,
Iterator,
Mapping,
Sequence,
Set,
Tuple,
Union,
cast,
)
import pandas as pd
from . import formatting, indexing
from .indexes import Indexes
from .merge import merge_coordinates_without_align, merge_coords
from .utils import Frozen, ReprObject, either_dict_or_kwargs
from .variable import Variable
if TYPE_CHECKING:
from .dataarray import DataArray
from .dataset import Dataset
# Used as the key corresponding to a DataArray's variable when converting
# arbitrary DataArray objects to datasets
_THIS_ARRAY = ReprObject("<this-array>")
class Coordinates(Mapping[Hashable, "DataArray"]):
__slots__ = ()
def __getitem__(self, key: Hashable) -> "DataArray":
raise NotImplementedError()
def __setitem__(self, key: Hashable, value: Any) -> None:
self.update({key: value})
@property
def _names(self) -> Set[Hashable]:
raise NotImplementedError()
@property
def dims(self) -> Union[Mapping[Hashable, int], Tuple[Hashable, ...]]:
raise NotImplementedError()
@property
def indexes(self) -> Indexes:
return self._data.indexes # type: ignore
@property
def variables(self):
raise NotImplementedError()
def _update_coords(self, coords, indexes):
raise NotImplementedError()
def __iter__(self) -> Iterator["Hashable"]:
# needs to be in the same order as the dataset variables
for k in self.variables:
if k in self._names:
yield k
def __len__(self) -> int:
return len(self._names)
def __contains__(self, key: Hashable) -> bool:
return key in self._names
def __repr__(self) -> str:
return formatting.coords_repr(self)
def to_dataset(self) -> "Dataset":
raise NotImplementedError()
def to_index(self, ordered_dims: Sequence[Hashable] = None) -> pd.Index:
"""Convert all index coordinates into a :py:class:`pandas.Index`.
Parameters
----------
ordered_dims : sequence of hashable, optional
Possibly reordered version of this object's dimensions indicating
the order in which dimensions should appear on the result.
Returns
-------
pandas.Index
Index subclass corresponding to the outer-product of all dimension
coordinates. This will be a MultiIndex if this object is has more
than more dimension.
"""
if ordered_dims is None:
ordered_dims = list(self.dims)
elif set(ordered_dims) != set(self.dims):
raise ValueError(
"ordered_dims must match dims, but does not: "
"{} vs {}".format(ordered_dims, self.dims)
)
if len(ordered_dims) == 0:
raise ValueError("no valid index for a 0-dimensional object")
elif len(ordered_dims) == 1:
(dim,) = ordered_dims
return self._data.get_index(dim) # type: ignore
else:
indexes = [self._data.get_index(k) for k in ordered_dims] # type: ignore
names = list(ordered_dims)
return pd.MultiIndex.from_product(indexes, names=names)
def update(self, other: Mapping[Hashable, Any]) -> None:
other_vars = getattr(other, "variables", other)
coords, indexes = merge_coords(
[self.variables, other_vars], priority_arg=1, indexes=self.indexes
)
self._update_coords(coords, indexes)
def _merge_raw(self, other):
"""For use with binary arithmetic."""
if other is None:
variables = dict(self.variables)
indexes = dict(self.indexes)
else:
variables, indexes = merge_coordinates_without_align([self, other])
return variables, indexes
@contextmanager
def _merge_inplace(self, other):
"""For use with in-place binary arithmetic."""
if other is None:
yield
else:
# don't include indexes in prioritized, because we didn't align
# first and we want indexes to be checked
prioritized = {
k: (v, None) for k, v in self.variables.items() if k not in self.indexes
}
variables, indexes = merge_coordinates_without_align(
[self, other], prioritized
)
yield
self._update_coords(variables, indexes)
def merge(self, other: "Coordinates") -> "Dataset":
"""Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
from .dataset import Dataset
if other is None:
return self.to_dataset()
if not isinstance(other, Coordinates):
other = Dataset(coords=other).coords
coords, indexes = merge_coordinates_without_align([self, other])
coord_names = set(coords)
merged = Dataset._construct_direct(
variables=coords, coord_names=coord_names, indexes=indexes
)
return merged
class DatasetCoordinates(Coordinates):
"""Dictionary like container for Dataset coordinates.
Essentially an immutable dictionary with keys given by the array's
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""
__slots__ = ("_data",)
def __init__(self, dataset: "Dataset"):
self._data = dataset
@property
def _names(self) -> Set[Hashable]:
return self._data._coord_names
@property
def dims(self) -> Mapping[Hashable, int]:
return self._data.dims
@property
def variables(self) -> Mapping[Hashable, Variable]:
return Frozen(
{k: v for k, v in self._data.variables.items() if k in self._names}
)
def __getitem__(self, key: Hashable) -> "DataArray":
if key in self._data.data_vars:
raise KeyError(key)
return cast("DataArray", self._data[key])
def to_dataset(self) -> "Dataset":
"""Convert these coordinates into a new Dataset
"""
return self._data._copy_listed(self._names)
def _update_coords(
self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]
) -> None:
from .dataset import calculate_dimensions
variables = self._data._variables.copy()
variables.update(coords)
# check for inconsistent state *before* modifying anything in-place
dims = calculate_dimensions(variables)
new_coord_names = set(coords)
for dim, size in dims.items():
if dim in variables:
new_coord_names.add(dim)
self._data._variables = variables
self._data._coord_names.update(new_coord_names)
self._data._dims = dims
# TODO(shoyer): once ._indexes is always populated by a dict, modify
# it to update inplace instead.
original_indexes = dict(self._data.indexes)
original_indexes.update(indexes)
self._data._indexes = original_indexes
def __delitem__(self, key: Hashable) -> None:
if key in self:
del self._data[key]
else:
raise KeyError(key)
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return [
key
for key in self._data._ipython_key_completions_()
if key not in self._data.data_vars
]
class DataArrayCoordinates(Coordinates):
"""Dictionary like container for DataArray coordinates.
Essentially a dict with keys given by the array's
dimensions and the values given by corresponding DataArray objects.
"""
__slots__ = ("_data",)
def __init__(self, dataarray: "DataArray"):
self._data = dataarray
@property
def dims(self) -> Tuple[Hashable, ...]:
return self._data.dims
@property
def _names(self) -> Set[Hashable]:
return set(self._data._coords)
def __getitem__(self, key: Hashable) -> "DataArray":
return self._data._getitem_coord(key)
def _update_coords(
self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]
) -> None:
from .dataset import calculate_dimensions
coords_plus_data = coords.copy()
coords_plus_data[_THIS_ARRAY] = self._data.variable
dims = calculate_dimensions(coords_plus_data)
if not set(dims) <= set(self.dims):
raise ValueError(
"cannot add coordinates with new dimensions to " "a DataArray"
)
self._data._coords = coords
# TODO(shoyer): once ._indexes is always populated by a dict, modify
# it to update inplace instead.
original_indexes = dict(self._data.indexes)
original_indexes.update(indexes)
self._data._indexes = original_indexes
@property
def variables(self):
return Frozen(self._data._coords)
def to_dataset(self) -> "Dataset":
from .dataset import Dataset
coords = {k: v.copy(deep=False) for k, v in self._data._coords.items()}
return Dataset._from_vars_and_coord_names(coords, set(coords))
def __delitem__(self, key: Hashable) -> None:
del self._data._coords[key]
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return self._data._ipython_key_completions_()
class LevelCoordinatesSource(Mapping[Hashable, Any]):
"""Iterator for MultiIndex level coordinates.
Used for attribute style lookup with AttrAccessMixin. Not returned directly
by any public methods.
"""
__slots__ = ("_data",)
def __init__(self, data_object: "Union[DataArray, Dataset]"):
self._data = data_object
def __getitem__(self, key):
# not necessary -- everything here can already be found in coords.
raise KeyError()
def __iter__(self) -> Iterator[Hashable]:
return iter(self._data._level_coords)
def __len__(self) -> int:
return len(self._data._level_coords)
def assert_coordinate_consistent(
obj: Union["DataArray", "Dataset"], coords: Mapping[Hashable, Variable]
) -> None:
"""Make sure the dimension coordinate of obj is consistent with coords.
obj: DataArray or Dataset
coords: Dict-like of variables
"""
for k in obj.dims:
# make sure there are no conflict in dimension coordinates
if k in coords and k in obj.coords:
if not coords[k].equals(obj[k].variable):
raise IndexError(
"dimension coordinate {!r} conflicts between "
"indexed and indexing objects:\n{}\nvs.\n{}".format(
k, obj[k], coords[k]
)
)
def remap_label_indexers(
obj: Union["DataArray", "Dataset"],
indexers: Mapping[Hashable, Any] = None,
method: str = None,
tolerance=None,
**indexers_kwargs: Any,
) -> Tuple[dict, dict]: # TODO more precise return type after annotations in indexing
"""Remap indexers from obj.coords.
If indexer is an instance of DataArray and it has coordinate, then this coordinate
will be attached to pos_indexers.
Returns
-------
pos_indexers: Same type of indexers.
np.ndarray or Variable or DataArray
new_indexes: mapping of new dimensional-coordinate.
"""
from .dataarray import DataArray
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "remap_label_indexers")
v_indexers = {
k: v.variable.data if isinstance(v, DataArray) else v
for k, v in indexers.items()
}
pos_indexers, new_indexes = indexing.remap_label_indexers(
obj, v_indexers, method=method, tolerance=tolerance
)
# attach indexer's coordinate to pos_indexers
for k, v in indexers.items():
if isinstance(v, Variable):
pos_indexers[k] = Variable(v.dims, pos_indexers[k])
elif isinstance(v, DataArray):
# drop coordinates found in indexers since .sel() already
# ensures alignments
coords = {k: var for k, var in v._coords.items() if k not in indexers}
pos_indexers[k] = DataArray(pos_indexers[k], coords=coords, dims=v.dims)
return pos_indexers, new_indexes
| apache-2.0 |
MartinDelzant/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
kernc/scikit-learn | examples/model_selection/randomized_search.py | 44 | 3253 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
chichilalescu/bfps | tests/test_filters.py | 1 | 7175 | #######################################################################
# #
# Copyright 2015 Max Planck Institute #
# for Dynamics and Self-Organization #
# #
# This file is part of bfps. #
# #
# bfps is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or (at your option) any later version. #
# #
# bfps is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with bfps. If not, see <http://www.gnu.org/licenses/> #
# #
# Contact: [email protected] #
# #
#######################################################################
# relevant for results of "bfps TEST filter_test"
import h5py
import numpy as np
import matplotlib.pyplot as plt
def phi_b(
x, ell):
phi = (6. / (np.pi * ell**3)) * np.ones(x.shape, x.dtype)
bindices = np.where(np.abs(x) > ell/2)
phi[bindices] = 0
return phi
def hat_phi_b(
k, ell,
prefactor = 1.0):
arg = k * ell / 2
phi = (3. / arg**3) * (np.sin(arg) - arg*np.cos(arg))
return phi
def phi_s(
x,
ell,
prefactor = 2*np.pi):
kc = prefactor / ell
arg = kc*x
phi = (np.sin(arg) - arg*np.cos(arg)) / (2 * x**3 * np.pi**2)
return phi
def hat_phi_s(
k, ell, prefactor = 2.8):
kc = prefactor / ell
bindices = np.where(np.abs(k) > kc)
phi = np.ones(k.shape, k.dtype)
phi[bindices] = 0
return phi
def phi_g(
x,
ell,
prefactor = 1):
sigma = prefactor * ell
phi = np.exp(- 0.5 * (x / sigma)**2) / (sigma**3 * (2*np.pi)**1.5)
return phi
def hat_phi_g(
k, ell,
prefactor = 0.23):
sigma = prefactor * ell
return np.exp(-0.5 * (k * sigma)**2)
def filter_comparison(
dd = None,
base_name = 'filter_test_',
dim = 0):
b = dd.df['ball/real/{0}'.format(dim)].value
g = dd.df['Gauss/real/{0}'.format(dim)].value
s = dd.df['sharp_Fourier_sphere/real/{0}'.format(dim)].value
d3V = dd.grid_spacing['x']*dd.grid_spacing['y']*dd.grid_spacing['z']
print(np.sum(b)*d3V)
print(np.sum(g)*d3V)
print(np.sum(s)*d3V)
levels = np.linspace(
min(b.min(), g.min(), s.min()),
max(b.max(), g.max(), s.max()),
64)
#levels = None
f = plt.figure(figsize = (12, 6))
a = f.add_subplot(131)
v = np.roll(b[..., 0], b.shape[0]//2, axis = 0)
v = np.roll(v, b.shape[0]//2, axis = 1)
cc = a.contourf(v, levels = levels)
f.colorbar(cc, ax = a, orientation = 'horizontal')
a = f.add_subplot(132)
v = np.roll(g[..., 0], g.shape[0]//2, axis = 0)
v = np.roll(v, g.shape[0]//2, axis = 1)
cc = a.contourf(v, levels = levels)
f.colorbar(cc, ax = a, orientation = 'horizontal')
a = f.add_subplot(133)
v = np.roll(s[..., 0], s.shape[0]//2, axis = 0)
v = np.roll(v, s.shape[0]//2, axis = 1)
cc = a.contourf(v, levels = levels)
f.colorbar(cc, ax = a, orientation = 'horizontal')
f.tight_layout()
f.savefig(base_name + '2D.pdf')
f = plt.figure(figsize = (6, 5))
a = f.add_subplot(111)
zz = dd.get_coordinate('z')
# ball filter
a.plot(
zz,
b[:, 0, 0],
label = '$\\phi^b$ numeric',
color = 'red',
dashes = (4, 4))
a.plot(
zz,
phi_b(zz, dd.parameters['filter_length']),
label = '$\\phi^b$ exact',
color = 'red',
dashes = (1, 1))
a.plot(
zz,
g[:, 0, 0],
label = '$\\phi^g$',
color = 'magenta',
dashes = (4, 4))
a.plot(
zz,
phi_g(zz, dd.parameters['filter_length'], prefactor = 0.5),
label = '$\\phi^g$ exact',
color = 'magenta',
dashes = (1, 1))
a.plot(
zz,
s[:, 0, 0],
label = '$\\phi^s$ numeric',
color = 'blue',
dashes = (4, 4))
a.plot(
zz,
phi_s(zz, dd.parameters['filter_length'], prefactor = 2*np.pi),
label = '$\\phi^s$ exact',
color = 'blue',
dashes = (1, 1))
a.set_xlim(0, np.pi)
a.legend(loc = 'best')
f.tight_layout()
f.savefig(base_name + '1D.pdf')
return None
def resolution_comparison(
dlist = None,
base_name = 'normalization_test_',
dim = 0,
filter_type = 'Gauss'):
f = plt.figure(figsize = (6, 5))
a = f.add_subplot(111)
for dd in dlist:
s0 = dd.df[filter_type + '/real/{0}'.format(dim)].value
a.plot(dd.get_coordinate('z'),
s0[:, 0, 0],
label = '{0}'.format(dd.simname))
a.legend(loc = 'best')
f.tight_layout()
f.savefig(base_name + filter_type + '_1D.pdf')
return None
class sim_data:
def __init__(
self,
simname = 'bla'):
self.simname = simname
self.df = h5py.File(simname + '_fields.h5', 'r')
pfile = h5py.File(simname + '.h5', 'r')
self.parameters = {}
for kk in pfile['parameters'].keys():
self.parameters[kk] = pfile['parameters/' + kk].value
self.grid_spacing = {}
for kk in ['x', 'y', 'z']:
self.grid_spacing[kk] = 2*np.pi / (self.parameters['dk' + kk] * self.parameters['n' + kk])
return None
def get_coordinate(
self,
c = 'x'):
return np.linspace(
0, 2*np.pi / self.parameters['dk' + c],
self.parameters['n' + c],
endpoint = False)
def main():
#d32 = sim_data(simname = 'N32')
#d48 = sim_data(simname = 'N48')
#d64 = sim_data(simname = 'N64')
#d128 = sim_data(simname = 'N128')
#for ff in ['ball',
# 'sharp_Fourier_sphere',
# 'Gauss']:
# resolution_comparison(
# [d32, d48, d64, d128],
# dim = 2,
# filter_type = ff)
dd = sim_data(simname = 'test')
filter_comparison(
dd,
dim = 0)
return None
if __name__ == '__main__':
main()
| gpl-3.0 |
ryanjmccall/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_macosx.py | 69 | 15397 | from __future__ import division
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
def show():
"""Show all the figures and enter the Cocoa mainloop.
This function will not return until all windows are closed or
the interpreter exits."""
# Having a Python-level function "show" wrapping the built-in
# function "show" in the _macosx extension module allows us to
# to add attributes to "show". This is something ipython does.
_macosx.show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forwards the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_path(path, transform, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_markers(marker_path, marker_trans, path, trans, rgbFace)
def draw_path_collection(self, *args):
gc = self.gc
args = args[:13]
gc.draw_path_collection(*args)
def draw_quad_mesh(self, *args):
gc = self.gc
gc.draw_quad_mesh(*args)
def new_gc(self):
self.gc.reset()
return self.gc
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
self.gc.draw_image(x, y, nrows, ncols, data, bbox, clippath, clippath_trans)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
size = prop.get_size_in_points()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
gc.draw_text(x, y, unicode(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# TODO: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: handle descent; This is based on backend_agg.py
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
return self.gc.get_text_width_height_descent(unicode(s), family, size, weight, style)
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_foreground(self, fg, isRGB=False):
if not isRGB:
fg = colorConverter.to_rgb(fg)
_macosx.GraphicsContext.set_foreground(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', **kwargs):
if dpi is None: dpi = matplotlib.rcParams['savefig.dpi']
filename = unicode(filename)
root, ext = os.path.splitext(filename)
ext = ext[1:].lower()
if not ext:
ext = "png"
filename = root + "." + ext
if ext=="jpg": ext = "jpeg"
# save the figure settings
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
# set the new parameters
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
if ext in ('jpeg', 'png', 'tiff', 'gif', 'bmp'):
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
self.write_bitmap(filename, width, height)
elif ext == 'pdf':
self.write_pdf(filename)
elif ext in ('ps', 'eps'):
from backend_ps import FigureCanvasPS
# Postscript backend changes figure.dpi, but doesn't change it back
origDPI = self.figure.dpi
fc = self.switch_backends(FigureCanvasPS)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.dpi = origDPI
self.figure.set_canvas(self)
elif ext=='svg':
from backend_svg import FigureCanvasSVG
fc = self.switch_backends(FigureCanvasSVG)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.set_canvas(self)
else:
raise ValueError("Figure format not available (extension %s)" % ext)
# restore original figure settings
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbarMac(canvas)
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# This is ugly, but this is what tkagg and gtk are doing.
# It is needed to get ginput() working.
self.canvas.figure.show = lambda *args: self.show()
def show(self):
self.canvas.draw()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbarMac(_macosx.NavigationToolbar):
def __init__(self, canvas):
self.canvas = canvas
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
images = {}
for imagename in ("stock_left",
"stock_right",
"stock_up",
"stock_down",
"stock_zoom-in",
"stock_zoom-out",
"stock_save_as"):
filename = os.path.join(basedir, imagename+".ppm")
images[imagename] = self._read_ppm_image(filename)
_macosx.NavigationToolbar.__init__(self, images)
self.message = None
def _read_ppm_image(self, filename):
data = ""
imagefile = open(filename)
for line in imagefile:
if "#" in line:
i = line.index("#")
line = line[:i] + "\n"
data += line
imagefile.close()
magic, width, height, maxcolor, imagedata = data.split(None, 4)
width, height = int(width), int(height)
assert magic=="P6"
assert len(imagedata)==width*height*3 # 3 colors in RGB
return (width, height, imagedata)
def panx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.pan(direction)
self.canvas.invalidate()
def pany(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.pan(direction)
self.canvas.invalidate()
def zoomx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.zoom(direction)
self.canvas.invalidate()
def zoomy(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.zoom(direction)
self.canvas.invalidate()
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(x0, y0, x1, y1)
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerMac
| gpl-3.0 |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/urbansim_parcel/indicators/make_buildout_indicators.py | 2 | 8339 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.configurations.dataset_pool_configuration import DatasetPoolConfiguration
from opus_core.indicator_framework.core.source_data import SourceData
from opus_core.indicator_framework.image_types.matplotlib_map import Map
#from opus_core.indicator_framework.image_types.matplotlib_chart import Chart
from opus_core.indicator_framework.image_types.table import Table
from opus_core.indicator_framework.image_types.geotiff_map import GeotiffMap
from opus_core.indicator_framework.image_types.dataset_table import DatasetTable
from opus_core.indicator_framework.image_types.matplotlib_lorenzcurve import LorenzCurve
from opus_core.indicator_framework.core.indicator_factory import IndicatorFactory
""" Prerequisite:
1. run buildingout_query.sql to create building_sqft_per_job_by_zone_generic_land_use_type_id table, or similar table for other geography
2. cache the table to the cache that needs to create buildout indicators for
3. get/create building_sqft_per_job_by_zone_generic_land_use_type_id_dataset.py (in urbansim_parcel/datasets)
4. active/planned/proposed projects are cached in development_project_proposals and development_project_proposal_components
5. add is_redevelopable attribute to parcel
"""
run_description = 'create indicators for buildout analysis'
cache_directory = r'M:\urbansim_cache\run_6573.2008_06_12_13_48'
source_data = SourceData(
cache_directory = cache_directory,
run_description = run_description,
years = [2001],
dataset_pool_configuration = DatasetPoolConfiguration(
package_order=['psrc_parcel','urbansim_parcel','psrc', 'urbansim','opus_core'],
),
)
attrs_by_glu = []
# this loop create indicators for each generic_land_type from 1 to 8
for glu_id in range(1, 9):
attrs_by_glu += [
## existing
"existing_job_spaces_glu%s = zone.aggregate(parcel.aggregate(building.non_residential_sqft)) / zone.disaggregate(building_sqft_per_job_by_zone_generic_land_use_type_id.building_sqft_per_job_glu%s)" % (glu_id, glu_id),
## existing units/job spaces excluding redevelopable parcels
"existing_job_spaces_unredev_glu%s=zone.aggregate(parcel.aggregate(building.non_residential_sqft) * numpy.logical_not(urbansim_parcel.parcel.is_redevelopable).astype(int32) ) / zone.disaggregate(building_sqft_per_job_by_zone_generic_land_use_type_id.building_sqft_per_job_glu%s)" % (glu_id, glu_id),
## redevelopment
"redev_buildout_units_glu%s=zone.aggregate(urbansim_parcel.parcel.max_units_per_acre_capacity_for_generic_land_use_type_%s * parcel.parcel_sqft * (urbansim_parcel.parcel.is_redevelopable).astype(int32) )" % (glu_id, glu_id),
"redev_buildout_job_spaces_glu%s=zone.aggregate(urbansim_parcel.parcel.max_far_capacity_for_generic_land_use_type_%s * parcel.parcel_sqft * (urbansim_parcel.parcel.is_redevelopable).astype(int32) ) / zone.disaggregate(building_sqft_per_job_by_zone_generic_land_use_type_id.building_sqft_per_job_glu%s)" % (glu_id, glu_id, glu_id),
## active(status_id==1)/planned(status_id==3)/proposed(status_id==2)
"proposed_job_spaces_glu%s = zone.aggregate(development_project_proposal.aggregate(urbansim_parcel.development_project_proposal_component.units_proposed * (numpy.logical_not(urbansim_parcel.development_project_proposal_component.is_residential)).astype(int32)) * (urbansim_parcel.development_project_proposal.status_id==2).astype(int32), intermediates=[parcel] ) / zone.disaggregate(building_sqft_per_job_by_zone_generic_land_use_type_id.building_sqft_per_job_glu%s)" % (glu_id, glu_id),
## vacant/agriculture buildout
"va_buildout_glu%s = zone.aggregate( urbansim_parcel.parcel.max_units_per_acre_capacity_for_generic_land_use_type_%s * ( parcel.parcel_sqft / 43560.0 ) * ( parcel.number_of_agents(development_project_proposal) == 0) * numpy.logical_or( parcel.disaggregate(land_use_type.land_use_name)=='vacant', parcel.disaggregate(land_use_type.land_use_name)=='agriculture') )" % (glu_id, glu_id),
"va_buildout_job_spaces_glu%s = zone.aggregate( urbansim_parcel.parcel.max_far_capacity_for_generic_land_use_type_%s * parcel.parcel_sqft * ( parcel.number_of_agents(development_project_proposal) == 0) * numpy.logical_or( parcel.disaggregate(land_use_type.land_use_name)=='vacant', parcel.disaggregate(land_use_type.land_use_name)=='agriculture') ) / zone.disaggregate(building_sqft_per_job_by_zone_generic_land_use_type_id.building_sqft_per_job_glu%s)" % (glu_id, glu_id, glu_id),
]
indicators=[
DatasetTable(
attributes = [
### indicators don't need to iterate by generic_land_use_type
## existing
"existing_units = zone.aggregate(urbansim_parcel.parcel.residential_units)",
#"existing_job_spaces_glu1 = zone.aggregate(parcel.aggregate(building.non_residential_sqft)) / zone.disaggregate(building_sqft_per_job_by_zone_generic_land_use_type_id.building_sqft_per_job_glu1)",
## existing units/job spaces excluding redevelopable parcels
"existing_units_unredev=zone.aggregate(urbansim_parcel.parcel.residential_units * numpy.logical_not(urbansim_parcel.parcel.is_redevelopable).astype(int32) )",
#"existing_job_spaces_unredev_glu1=zone.aggregate(parcel.aggregate(building.non_residential_sqft) * numpy.logical_not(urbansim_parcel.parcel.is_redevelopable).astype(int32) ) / zone.disaggregate(building_sqft_per_job_by_zone_generic_land_use_type_id.building_sqft_per_job_glu1)",
## redevelopment
#"redev_buildout_units_glu1=zone.aggregate(urbansim_parcel.parcel.max_units_per_acre_capacity_for_generic_land_use_type_1 * parcel.parcel_sqft * (urbansim_parcel.parcel.is_redevelopable).astype(int32) )",
#"redev_buildout_job_spaces_glu1=zone.aggregate(urbansim_parcel.parcel.max_far_capacity_for_generic_land_use_type_1 * parcel.parcel_sqft * (urbansim_parcel.parcel.is_redevelopable).astype(int32) ) / zone.disaggregate(building_sqft_per_job_by_zone_generic_land_use_type_id.building_sqft_per_job_glu1)",
## active(status_id==1)/planned(status_id==3)/proposed(status_id==2)
"proposed_units = zone.aggregate(development_project_proposal.aggregate(urbansim_parcel.development_project_proposal_component.units_proposed * (urbansim_parcel.development_project_proposal_component.is_residential).astype(int32)) * (urbansim_parcel.development_project_proposal.status_id==2).astype(int32), intermediates=[parcel] )",
#"proposed_job_spaces_glu1 = zone.aggregate(development_project_proposal.aggregate(urbansim_parcel.development_project_proposal_component.units_proposed * (numpy.logical_not(urbansim_parcel.development_project_proposal_component.is_residential)).astype(int32)) * (urbansim_parcel.development_project_proposal.status_id==2).astype(int32), intermediates=[parcel] ) / zone.disaggregate(building_sqft_per_job_by_zone_generic_land_use_type_id.building_sqft_per_job_glu1)",
## vacant/agriculture buildout
#"va_buildout_glu1 = zone.aggregate( urbansim_parcel.parcel.max_units_per_acre_capacity_for_generic_land_use_type_1 * parcel.parcel_sqft * ( parcel.number_of_agents(development_project_proposal) == 0) * numpy.logical_or( parcel.disaggregate(land_use_type.land_use_name)=='vacant', parcel.disaggregate(land_use_type.land_use_name)=='agriculture') )",
#"va_buildout_job_spaces_glu1 = zone.aggregate( urbansim_parcel.parcel.max_far_capacity_for_generic_land_use_type_1 * parcel.parcel_sqft * ( parcel.number_of_agents(development_project_proposal) == 0) * numpy.logical_or( parcel.disaggregate(land_use_type.land_use_name)=='vacant', parcel.disaggregate(land_use_type.land_use_name)=='agriculture') ) / zone.disaggregate(building_sqft_per_job_by_zone_generic_land_use_type_id.building_sqft_per_job_glu1)",
] + attrs_by_glu,
dataset_name = 'zone',
source_data = source_data,
name = 'zone_buildout',
),
]
IndicatorFactory().create_indicators(
indicators = indicators,
display_error_box = False,
show_results = False)
| gpl-2.0 |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_traps/traps2/src/dataset.py | 55 | 78980 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import locale
import socket
import tarfile
import urllib2
import zipfile
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from files import *
from general import *
from ui import *
class Dataset(object):
"""Dataset base class.
The specific dataset classes are inherited from this class, and only needed methods are reimplemented.
"""
def __init__(self, data_path='data', name='dataset'):
"""__init__ method.
Parameters
----------
data_path : str
Basepath where the dataset is stored.
(Default value='data')
"""
# Folder name for dataset
self.name = name
# Path to the dataset
self.local_path = os.path.join(data_path, self.name)
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
# Evaluation setup folder
self.evaluation_setup_folder = 'evaluation_setup'
# Path to the folder containing evaluation setup files
self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder)
# Meta data file, csv-format
self.meta_filename = 'meta.txt'
# Path to meta data file
self.meta_file = os.path.join(self.local_path, self.meta_filename)
# Hash file to detect removed or added files
self.filelisthash_filename = 'filelist.hash'
# Number of evaluation folds
self.evaluation_folds = 1
# List containing dataset package items
# Define this in the inherited class.
# Format:
# {
# 'remote_package': download_url,
# 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'),
# 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'),
# }
self.package_list = []
# List of audio files
self.files = None
# List of meta data dict
self.meta_data = None
# Training meta data for folds
self.evaluation_data_train = {}
# Testing meta data for folds
self.evaluation_data_test = {}
# Recognized audio extensions
self.audio_extensions = {'wav', 'flac'}
# Info fields for dataset
self.authors = ''
self.name_remote = ''
self.url = ''
self.audio_source = ''
self.audio_type = ''
self.recording_device_model = ''
self.microphone_model = ''
@property
def audio_files(self):
"""Get all audio files in the dataset
Parameters
----------
Nothing
Returns
-------
filelist : list
File list with absolute paths
"""
if self.files is None:
self.files = []
for item in self.package_list:
path = item['local_audio_path']
if path:
l = os.listdir(path)
for f in l:
file_name, file_extension = os.path.splitext(f)
if file_extension[1:] in self.audio_extensions:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
@property
def audio_file_count(self):
"""Get number of audio files in dataset
Parameters
----------
Nothing
Returns
-------
filecount : int
Number of audio files
"""
return len(self.audio_files)
@property
def meta(self):
"""Get meta data for dataset. If not already read from disk, data is read and returned.
Parameters
----------
Nothing
Returns
-------
meta_data : list
List containing meta data as dict.
Raises
-------
IOError
meta file not found.
"""
if self.meta_data is None:
self.meta_data = []
meta_id = 0
if os.path.isfile(self.meta_file):
f = open(self.meta_file, 'rt')
try:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) == 2:
# Scene meta
self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()})
elif len(row) == 4:
# Audio tagging meta
self.meta_data.append(
{'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(),
'tags': row[3].split(';')})
elif len(row) == 6:
# Event meta
self.meta_data.append({'file': row[0],
'scene_label': row[1].rstrip(),
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4].rstrip(),
'event_type': row[5].rstrip(),
'id': meta_id
})
meta_id += 1
finally:
f.close()
else:
raise IOError("Meta file not found [%s]" % self.meta_file)
return self.meta_data
@property
def meta_count(self):
"""Number of meta data items.
Parameters
----------
Nothing
Returns
-------
meta_item_count : int
Meta data item count
"""
return len(self.meta)
@property
def fold_count(self):
"""Number of fold in the evaluation setup.
Parameters
----------
Nothing
Returns
-------
fold_count : int
Number of folds
"""
return self.evaluation_folds
@property
def scene_labels(self):
"""List of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of scene labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels
@property
def scene_label_count(self):
"""Number of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
scene_label_count : int
Number of unique scene labels.
"""
return len(self.scene_labels)
@property
def event_labels(self):
"""List of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
@property
def event_label_count(self):
"""Number of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
event_label_count : int
Number of unique event labels
"""
return len(self.event_labels)
@property
def audio_tags(self):
"""List of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of audio tags in alphabetical order.
"""
tags = []
for item in self.meta:
if 'tags' in item:
for tag in item['tags']:
if tag and tag not in tags:
tags.append(tag)
tags.sort()
return tags
@property
def audio_tag_count(self):
"""Number of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
audio_tag_count : int
Number of unique audio tags
"""
return len(self.audio_tags)
def __getitem__(self, i):
"""Getting meta data item
Parameters
----------
i : int
item id
Returns
-------
meta_data : dict
Meta data item
"""
if i < len(self.meta):
return self.meta[i]
else:
return None
def __iter__(self):
"""Iterator for meta data items
Parameters
----------
Nothing
Returns
-------
Nothing
"""
i = 0
meta = self[i]
# yield window while it's valid
while meta is not None:
yield meta
# get next item
i += 1
meta = self[i]
@staticmethod
def print_bytes(num_bytes):
"""Output number of bytes according to locale and with IEC binary prefixes
Parameters
----------
num_bytes : int > 0 [scalar]
Bytes
Returns
-------
bytes : str
Human readable string
"""
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
locale.setlocale(locale.LC_ALL, '')
output = locale.format("%d", num_bytes, grouping=True) + ' bytes'
if num_bytes > YiB:
output += ' (%.4g YiB)' % (num_bytes / YiB)
elif num_bytes > ZiB:
output += ' (%.4g ZiB)' % (num_bytes / ZiB)
elif num_bytes > EiB:
output += ' (%.4g EiB)' % (num_bytes / EiB)
elif num_bytes > PiB:
output += ' (%.4g PiB)' % (num_bytes / PiB)
elif num_bytes > TiB:
output += ' (%.4g TiB)' % (num_bytes / TiB)
elif num_bytes > GiB:
output += ' (%.4g GiB)' % (num_bytes / GiB)
elif num_bytes > MiB:
output += ' (%.4g MiB)' % (num_bytes / MiB)
elif num_bytes > KiB:
output += ' (%.4g KiB)' % (num_bytes / KiB)
return output
def download(self):
"""Download dataset over the internet to the local path
Parameters
----------
Nothing
Returns
-------
Nothing
Raises
-------
IOError
Download failed.
"""
section_header('Download dataset')
for item in self.package_list:
try:
if item['remote_package'] and not os.path.isfile(item['local_package']):
data = None
req = urllib2.Request(item['remote_package'], data, {})
handle = urllib2.urlopen(req)
if "Content-Length" in handle.headers.items():
size = int(handle.info()["Content-Length"])
else:
size = None
actualSize = 0
blocksize = 64 * 1024
tmp_file = os.path.join(self.local_path, 'tmp_file')
fo = open(tmp_file, "wb")
terminate = False
while not terminate:
block = handle.read(blocksize)
actualSize += len(block)
if size:
progress(title_text=os.path.split(item['local_package'])[1],
percentage=actualSize / float(size),
note=self.print_bytes(actualSize))
else:
progress(title_text=os.path.split(item['local_package'])[1],
note=self.print_bytes(actualSize))
if len(block) == 0:
break
fo.write(block)
fo.close()
os.rename(tmp_file, item['local_package'])
except (urllib2.URLError, socket.timeout), e:
try:
fo.close()
except:
raise IOError('Download failed [%s]' % (item['remote_package']))
foot()
def extract(self):
"""Extract the dataset packages
Parameters
----------
Nothing
Returns
-------
Nothing
"""
section_header('Extract dataset')
for item_id, item in enumerate(self.package_list):
if item['local_package']:
if item['local_package'].endswith('.zip'):
with zipfile.ZipFile(item['local_package'], "r") as z:
# Trick to omit first level folder
parts = []
for name in z.namelist():
if not name.endswith('/'):
parts.append(name.split('/')[:-1])
prefix = os.path.commonprefix(parts) or ''
if prefix:
if len(prefix) > 1:
prefix_ = list()
prefix_.append(prefix[0])
prefix = prefix_
prefix = '/'.join(prefix) + '/'
offset = len(prefix)
# Start extraction
members = z.infolist()
file_count = 1
for i, member in enumerate(members):
if len(member.filename) > offset:
member.filename = member.filename[offset:]
if not os.path.isfile(os.path.join(self.local_path, member.filename)):
z.extract(member, self.local_path)
progress(
title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']',
percentage=(file_count / float(len(members))),
note=member.filename)
file_count += 1
elif item['local_package'].endswith('.tar.gz'):
tar = tarfile.open(item['local_package'], "r:gz")
for i, tar_info in enumerate(tar):
if not os.path.isfile(os.path.join(self.local_path, tar_info.name)):
tar.extract(tar_info, self.local_path)
progress(title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']',
note=tar_info.name)
tar.members = []
tar.close()
foot()
def on_after_extract(self):
"""Dataset meta data preparation, this will be overloaded in dataset specific classes
Parameters
----------
Nothing
Returns
-------
Nothing
"""
pass
def get_filelist(self):
"""List of files under local_path
Parameters
----------
Nothing
Returns
-------
filelist: list
File list
"""
filelist = []
for path, subdirs, files in os.walk(self.local_path):
for name in files:
filelist.append(os.path.join(path, name))
return filelist
def check_filelist(self):
"""Generates hash from file list and check does it matches with one saved in filelist.hash.
If some files have been deleted or added, checking will result False.
Parameters
----------
Nothing
Returns
-------
result: bool
Result
"""
if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)):
hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0]
if hash != get_parameter_hash(sorted(self.get_filelist())):
return False
else:
return True
else:
return False
def save_filelist_hash(self):
"""Generates file list hash, and saves it as filelist.hash under local_path.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
filelist = self.get_filelist()
filelist_hash_not_found = True
for file in filelist:
if self.filelisthash_filename in file:
filelist_hash_not_found = False
if filelist_hash_not_found:
filelist.append(os.path.join(self.local_path, self.filelisthash_filename))
save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist)))
def fetch(self):
"""Download, extract and prepare the dataset.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
if not self.check_filelist():
self.download()
self.extract()
self.on_after_extract()
self.save_filelist_hash()
return self
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 2:
# Scene meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1]
})
elif len(row) == 4:
# Audio tagging meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'tag_string': row[2],
'tags': row[3].split(';')
})
elif len(row) == 5:
# Event meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
else:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label']
})
self.evaluation_data_train[0] = data
return self.evaluation_data_train[fold]
def test(self, fold=0):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[fold] = data
return self.evaluation_data_test[fold]
def folds(self, mode='folds'):
"""List of fold ids
Parameters
----------
mode : str {'folds','full'}
Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training.
(Default value=folds)
Returns
-------
list : list of integers
Fold ids
"""
if mode == 'folds':
return range(1, self.evaluation_folds + 1)
elif mode == 'full':
return [0]
def file_meta(self, file):
"""Meta data for given file
Parameters
----------
file : str
File name
Returns
-------
list : list of dicts
List containing all meta data related to given file.
"""
file = self.absolute_to_relative(file)
file_meta = []
for item in self.meta:
if item['file'] == file:
file_meta.append(item)
return file_meta
def relative_to_absolute_path(self, path):
"""Converts relative path into absolute path.
Parameters
----------
path : str
Relative path
Returns
-------
path : str
Absolute path
"""
return os.path.abspath(os.path.join(self.local_path, path))
def absolute_to_relative(self, path):
"""Converts absolute path into relative path.
Parameters
----------
path : str
Absolute path
Returns
-------
path : str
Relative path
"""
if path.startswith(os.path.abspath(self.local_path)):
return os.path.relpath(path, self.local_path)
else:
return path
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(Dataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, development dataset'
self.url = 'https://zenodo.org/record/45739'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
meta_data = {}
for fold in xrange(1, self.evaluation_folds):
# Read train files in
train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')
f = open(train_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
# Read evaluation files in
eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
class TUTAcousticScenes_2016_EvaluationSet(Dataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt')
if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename):
section_header('Generating meta file for dataset')
meta_data = {}
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
def train(self, fold=0):
raise IOError('Train setup not available.')
# TUT Sound events 2016 development and evaluation sets
class TUTSoundEvents_2016_DevelopmentSet(Dataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, development dataset'
self.url = 'https://zenodo.org/record/45759'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'),
base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_train[fold]:
self.evaluation_data_train[fold][scene_label_] = []
if fold > 0:
with open(
os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_train.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 5:
# Event meta
self.evaluation_data_train[fold][scene_label_].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if item['scene_label'] == scene_label_:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
self.evaluation_data_train[0][scene_label_] = data
if scene_label:
return self.evaluation_data_train[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_train[fold][scene_label_]:
data.append(item)
return data
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(
os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_test.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append(
{'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if scene_label_ in item:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
class TUTSoundEvents_2016_EvaluationSet(Dataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
]
@property
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'] not in labels:
labels.append(item['event_label'])
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path, 'meta')):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'),
base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
raise IOError('Train setup not available.')
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append(
{'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.audio_files:
if scene_label_ in item:
if self.relative_to_absolute_path(item) not in files:
data.append({'file': self.relative_to_absolute_path(item)})
files.append(self.relative_to_absolute_path(item))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
# CHIME home
class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset):
def __init__(self, data_path=None):
Dataset.__init__(self, data_path=data_path, name='CHiMeHome-audiotag-development')
self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley'
self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.'
self.url = ''
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Unknown'
self.evaluation_folds = 10
self.package_list = [
{
'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz',
'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'),
'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'),
},
]
@property
def audio_files(self):
"""Get all audio files in the dataset, use only file from CHime-Home-refined set.
Parameters
----------
nothing
Returns
-------
files : list
audio files
"""
if self.files is None:
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(row[1])
self.files = []
for file in self.package_list:
path = file['local_audio_path']
if path:
l = os.listdir(path)
p = path.replace(self.local_path + os.path.sep, '')
for f in l:
fileName, fileExtension = os.path.splitext(f)
if fileExtension[1:] in self.audio_extensions and fileName in refined_files:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
def read_chunk_meta(self, meta_filename):
if os.path.isfile(meta_filename):
meta_file_handle = open(meta_filename, 'rt')
try:
meta_file_reader = csv.reader(meta_file_handle, delimiter=',')
data = {}
for meta_file_row in meta_file_reader:
data[meta_file_row[0]] = meta_file_row[1]
finally:
meta_file_handle.close()
return data
def tagcode_to_taglabel(self, tag):
map = {'c': 'child speech',
'm': 'adult male speech',
'f': 'adult female speech',
'v': 'video game/tv',
'p': 'percussive sound',
'b': 'broadband noise',
'o': 'other',
'S': 'silence/background',
'U': 'unidentifiable'
}
if tag in map:
return map[tag]
else:
return None
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Legacy dataset meta files are converted to be compatible with current scheme.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
scene_label = 'home'
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(raw_path, base_filename + '.csv')
meta_data = self.read_chunk_meta(annotation_filename)
tags = []
for i, tag in enumerate(meta_data['majorityvote']):
if tag is 'b':
print file
if tag is not 'S' and tag is not 'U':
tags.append(self.tagcode_to_taglabel(tag))
tags = ';'.join(tags)
writer.writerow(
(os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
for target_tag in self.audio_tags:
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_test.txt')):
all_folds_found = False
if not all_folds_found:
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
numpy.random.seed(475686)
kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True)
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(
self.relative_to_absolute_path(os.path.join('chime_home', 'chunks', row[1] + '.wav')))
fold = 1
files = numpy.array(refined_files)
for train_index, test_index in kf:
train_files = files[train_index]
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow(
[os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'],
';'.join(item['tags'])])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow(
[os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'],
';'.join(item['tags'])])
fold += 1
# Legacy datasets
# =====================================================
# DCASE 2013
# =====================================================
class DCASE2013_Scene_DevelopmentSet(Dataset):
"""DCASE 2013 Acoustic scene classification, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1',
'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet):
"""DCASE 2013 Acoustic scene classification, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file) or 1:
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
# Sound events
class DCASE2013_Event_DevelopmentSet(Dataset):
"""DCASE 2013 Sound event detection, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip',
'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'),
'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'),
},
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9',
# 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'),
# 'local_audio_path': None,
# },
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7',
# 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'),
# 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'),
# },
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('singlesounds_stereo') != -1:
annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt')
label = base_filename[:-2]
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1], label, 'i'))
finally:
annotation_file_handle.close()
elif file.find('events_OS_development_v2') != -1:
annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
class DCASE2013_Event_EvaluationSet(Dataset):
"""DCASE 2013 Sound event detection, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip',
'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'),
'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('dcase2013_event_detection_testset_OS') != -1:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
else:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',
base_filename + '.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
| mit |
bioinfo-core-BGU/neatseq-flow_modules | neatseq_flow_modules/Liron/cgMLST_and_MLST_typing_module/MLST_parser.py | 3 | 8400 | import os, re
import argparse
import pandas as pd
parser = argparse.ArgumentParser(description='Pars MLST')
parser.add_argument('-M', type=str,
help='MetaData file')
parser.add_argument('-F', type=str,
help='Merged MLST typing file')
parser.add_argument('-O' , type=str, default=os.getcwd(),
help='Output file directory')
parser.add_argument('-C', type=float, default=0.95,
help='Percentage of identified allele cutoff to consider sample [0.0 - 1.0]')
parser.add_argument('--S_MetaData', type=str, default="Samples",
help='samples ID field in the metadata file')
parser.add_argument('--S_Merged', type=str, default="Samples",
help='samples ID field in the Merged file')
parser.add_argument('--Non_allelic', nargs='+', type=str, default=["Samples",'Status','Percentage_of_missing_genes'],
help='Non allelic fields in the Merged file')
parser.add_argument('--Fields', nargs='+', type=str, default=['Status','Percentage_of_missing_genes'],
help='Fields to move to the metadata file')
parser.add_argument('--Cut', action='store_true', default=False,
help='Use only samples with metadata information')
parser.add_argument('--FASTA', action='store_true', default=False,
help='The input is a FASTA file')
parser.add_argument('--Polymorphic_sites_only', action='store_true', default=False,
help='Filter Non Polymorphic Sites from fasta input file')
parser.add_argument('--Tree', action='store_true', default=False,
help='Generate newick Tree using hierarchical-clustering [Hamming distance]')
parser.add_argument('--Tree_method', type=str, default='complete',
help='The hierarchical-clustering linkage method [default=complete]')
parser.add_argument('--ignore_unidentified_alleles', action='store_true', default=False,
help='Remove columns with unidentified alleles [default=False]')
args = parser.parse_args()
def isnumber(str):
if str==str:
try:
float(str)
return True
except ValueError:
return False
else:
return False
Fields=[]
if args.Fields != None:
for field in args.Fields:
if len(field.split(","))>1:
for field_t in field.split(","):
Fields=Fields+[field_t]
else:
Fields=Fields+[field]
args.Fields=Fields
Fields=[]
if args.Non_allelic != None:
for field in args.Non_allelic:
if len(field.split(","))>1:
for field_t in field.split(","):
Fields=Fields+[field_t]
else:
Fields=Fields+[field]
args.Non_allelic=Fields
flag=0
if args.FASTA:
from Bio import AlignIO
msa=AlignIO.read(args.F,"fasta")
data=pd.DataFrame.from_records(msa)
data.index=list([msa[int(x)].id for x in list(data.index)])
data=data.drop(data.columns[data.apply(lambda x: len(re.findall("[- N]",x.sum().upper()))>0 ,axis=0)],axis=1)
if args.Polymorphic_sites_only:
data=data.drop(data.columns[data.apply(lambda x: len(list(set(x.sum().upper())))==1 ,axis=0)],axis=1)
temp_data=data
else:
temp_data = pd.read_csv(args.F, sep='\t',index_col=False, encoding="ISO-8859-1")
temp_data=temp_data.set_index(args.S_Merged,drop=False).copy()
temp_data=temp_data.applymap(lambda x: int(float(x)) if isnumber(x) else x).copy()
for j in temp_data.index:
for i in temp_data.columns:
if str(temp_data.loc[j,i]).startswith("New_Allele="):
temp_data.loc[temp_data.loc[:,i]==temp_data.loc[j,i],i]=i+"_"+str(j)
temp_data.index=list([str(x) for x in temp_data.index])
if (args.M != None)&(args.Cut):
MetaData = pd.read_csv(args.M , sep='\t',index_col=False, encoding="ISO-8859-1")
MetaData=MetaData.set_index(args.S_MetaData,drop=False).copy()
MetaData.index=list([str(x) for x in MetaData.index])
flag=1
temp_data=temp_data.loc[list([x in MetaData.index for x in temp_data.index]),].copy()
args.Non_allelic.extend([args.S_Merged])
args.Non_allelic.extend(args.Fields)
if None in args.Non_allelic:
args.Non_allelic.remove(None)
args.Non_allelic=set(args.Non_allelic)
def cut_rows(temp_data,cutoff,Non_allelic_rows):
drop=[x for x in temp_data.columns if x not in Non_allelic_rows]
temp_data=temp_data[drop]
stay=list()
for row in temp_data.index:
if (float(sum(temp_data.ix[row]!='N'))/ float(temp_data.shape[1]))>=cutoff:
stay.append(row)
else:
print(("The Sample %s has lower percentage of identified allele than the cutoff" % row ))
print(("%s" % (float(sum(temp_data.ix[row]!='N'))/ float(temp_data.shape[1]))))
return temp_data.ix[stay].copy()
def cut_col(temp_data,Non_allelic):
stay=list()
for col in temp_data.columns:
if col not in Non_allelic:
if sum(temp_data.ix[row]!='N')==temp_data.shape[0]:
stay.append(col)
else:
stay.append(col)
return temp_data[stay].copy()
def drop(data,fields,op=1):
if op==1:
for i in fields:
if i in data.columns:
data=data.drop(i,axis=1).copy()
else:
for i in fields:
if i in data:
data=data.drop(i).copy()
return data
if args.ignore_unidentified_alleles:
new_temp_data=cut_col(cut_rows(temp_data, args.C ,args.Non_allelic),args.Non_allelic)
else:
new_temp_data=cut_rows(temp_data, args.C ,args.Non_allelic)
if args.Tree:
from scipy.cluster.hierarchy import dendrogram, linkage, to_tree
from scipy.spatial.distance import pdist ,squareform
def getNewick(node, newick, parentdist, leaf_names):
if node.is_leaf():
return "%s:%.2f%s" % (leaf_names[node.id], parentdist - node.dist, newick)
else:
if len(newick) > 0:
newick = "):%.2f%s" % (parentdist - node.dist, newick)
else:
newick = ");"
newick = getNewick(node.get_left(), newick, node.dist, leaf_names)
newick = getNewick(node.get_right(), ",%s" % (newick), node.dist, leaf_names)
newick = "(%s" % (newick)
return newick
Tree_data=drop(new_temp_data,args.Non_allelic,0).copy()
Tree_data=Tree_data.applymap(lambda x: str(x).upper()).copy()
x=pdist(Tree_data, lambda u, v: sum([1 for i in range(len(u)) if u[i] != v[i]]) )
Z = linkage(x,method=args.Tree_method,optimal_ordering=True)
tree = to_tree(Z,False)
h=open(os.path.join(args.O,'Tree.%s' % "newick"),'w')
h.write( getNewick(tree, "", tree.dist, Tree_data.index ))
h.close()
if args.FASTA:
new_temp_data['seq']=new_temp_data.apply(lambda x: x.sum().upper() ,axis=1)
g=new_temp_data.groupby('seq')
new_temp_data=new_temp_data.drop(new_temp_data.columns[new_temp_data.columns=='seq'],axis=1)
else:
m=new_temp_data.columns
m=drop(m,args.Non_allelic,0).copy()
g=new_temp_data.groupby(list(m[:]))
new_temp_data["Index"]=''
num=1
for i in g:
new_temp_data.loc[i[1].index,"Index"]=str(num)
num=num+1
if args.M != None:
if flag!=1:
MetaData = pd.read_csv(args.M , sep='\t',index_col=False, encoding="ISO-8859-1")
MetaData=MetaData.set_index(args.S_MetaData,drop=False).copy()
MetaData.index=list([str(x) for x in MetaData.index])
MetaData=MetaData.join(new_temp_data["Index"])
MetaData=MetaData.loc[~MetaData["Index"].isnull(),:]
if args.Fields != None:
for field in args.Fields:
if field in temp_data.columns:
MetaData=MetaData.join(temp_data[field],lsuffix='_Old')
MetaData=MetaData.set_index("Index").copy()
else:
MetaData=new_temp_data[["Index"]].copy()
if args.Fields != None:
for field in args.Fields:
if field in temp_data.columns:
MetaData=MetaData.join(temp_data[field],lsuffix='_Old')
MetaData.to_csv(os.path.join(args.O,'phyloviz_MetaData.tab'), sep='\t',index=True,float_format='%s')
new_temp_data=new_temp_data.set_index("Index").copy()
#new_temp_data=drop(new_temp_data,args.Non_allelic).copy()
new_temp_data.to_csv(os.path.join(args.O, 'phyloviz_Alleles.tab'), sep='\t',index=True,float_format='%s')
| gpl-3.0 |
zycdragonball/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 29 | 5666 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using convolutional networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_cnn_model(features, labels, mode):
"""Character level convolutional neural network model to predict classes."""
features_onehot = tf.one_hot(features[CHARS_FEATURE], 256)
input_layer = tf.reshape(
features_onehot, [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
input_layer,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
x_train = x_train.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
x_test = x_test.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
# Build model
classifier = tf.estimator.Estimator(model_fn=char_cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
yufengg/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 16 | 46691 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
update_global_step = variables.get_global_step().assign_add(1)
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return (const, const,
control_flow_ops.group(train_op_1, training_op_2,
update_global_step))
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, update_global_step
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(
expected_features, expected_labels, actual_features, actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1))
def _make_input_fn(features, labels):
def _input_fn():
return {
k: constant_op.constant(v)
for k, v in six.iteritems(features)
}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn, params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return (constant_op.constant(0.), constant_op.constant(0.),
variables.get_global_step().assign_add(1))
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing train_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(est.model_dir + '/export', serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir,
model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
self.assertAllEqual(
['model.ckpt-1', 'model.ckpt-5'], ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1,
model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2,
model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertSameElements(
['bogus_lookup', 'feature'],
graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS))
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
stuarteberg/numpy | numpy/lib/recfunctions.py | 148 | 35012 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| bsd-3-clause |
michigraber/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
aalmah/pylearn2 | pylearn2/train_extensions/live_monitoring.py | 30 | 11536 | """
Training extension for allowing querying of monitoring values while an
experiment executes.
"""
__authors__ = "Dustin Webb"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Dustin Webb"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import copy
try:
import zmq
zmq_available = True
except:
zmq_available = False
try:
import matplotlib.pyplot as plt
pyplot_available = True
except:
pyplot_available = False
from functools import wraps
from pylearn2.monitor import Monitor
from pylearn2.train_extensions import TrainExtension
class LiveMonitorMsg(object):
"""
Base class that defines the required interface for all Live Monitor
messages.
"""
response_set = False
def get_response(self):
"""
Method that instantiates a response message for a given request
message. It is not necessary to implement this function on response
messages.
"""
raise NotImplementedError('get_response is not implemented.')
class ChannelListResponse(LiveMonitorMsg):
"""
A message containing the list of channels being monitored.
"""
pass
class ChannelListRequest(LiveMonitorMsg):
"""
A message indicating a request for a list of channels being monitored.
"""
@wraps(LiveMonitorMsg.get_response)
def get_response(self):
return ChannelListResponse()
class ChannelsResponse(LiveMonitorMsg):
"""
A message containing monitoring data related to the channels specified.
Data can be requested for all epochs or select epochs.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
start : int
The starting epoch for which data should be returned.
end : int
The epoch after which data should be returned.
step : int
The number of epochs to be skipped between data points.
"""
def __init__(self, channel_list, start, end, step=1):
assert(
isinstance(channel_list, list)
and len(channel_list) > 0
)
self.channel_list = channel_list
assert(start >= 0)
self.start = start
self.end = end
assert(step > 0)
self.step = step
class ChannelsRequest(LiveMonitorMsg):
"""
A message for requesting data related to the channels specified.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
start : int
The starting epoch for which data should be returned.
end : int
The epoch after which data should be returned.
step : int
The number of epochs to be skipped between data points.
"""
def __init__(self, channel_list, start=0, end=-1, step=1):
assert(
isinstance(channel_list, list)
and len(channel_list) > 0
)
self.channel_list = channel_list
assert(start >= 0)
self.start = start
self.end = end
assert(step > 0)
self.step = step
@wraps(LiveMonitorMsg.get_response)
def get_response(self):
return ChannelsResponse(
self.channel_list,
self.start,
self.end,
self.step
)
class LiveMonitoring(TrainExtension):
"""
A training extension for remotely monitoring and filtering the channels
being monitored in real time. PyZMQ must be installed for this extension
to work.
Parameters
----------
address : string
The IP addresses of the interfaces on which the monitor should listen.
req_port : int
The port number to be used to service request.
pub_port : int
The port number to be used to publish updates.
"""
def __init__(self, address='*', req_port=5555, pub_port=5556):
if not zmq_available:
raise ImportError('zeromq needs to be installed to '
'use this module.')
self.address = 'tcp://%s' % address
assert(req_port != pub_port)
assert(req_port > 1024 and req_port < 65536)
self.req_port = req_port
assert(pub_port > 1024 and pub_port < 65536)
self.pub_port = pub_port
address_template = self.address + ':%d'
self.context = zmq.Context()
self.req_sock = None
if self.req_port > 0:
self.req_sock = self.context.socket(zmq.REP)
self.req_sock.bind(address_template % self.req_port)
self.pub_sock = None
if self.pub_port > 0:
self.pub_sock = self.context.socket(zmq.PUB)
self.req_sock.bind(address_template % self.pub_port)
# Tracks the number of times on_monitor has been called
self.counter = 0
@wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
monitor = Monitor.get_monitor(model)
try:
rsqt_msg = self.req_sock.recv_pyobj(flags=zmq.NOBLOCK)
# Determine what type of message was received
rsp_msg = rsqt_msg.get_response()
if isinstance(rsp_msg, ChannelListResponse):
rsp_msg.data = list(monitor.channels.keys())
if isinstance(rsp_msg, ChannelsResponse):
channel_list = rsp_msg.channel_list
if (
not isinstance(channel_list, list)
or len(channel_list) == 0
):
channel_list = []
result = TypeError(
'ChannelResponse requires a list of channels.'
)
result = {}
for channel_name in channel_list:
if channel_name in monitor.channels.keys():
chan = copy.deepcopy(
monitor.channels[channel_name]
)
end = rsp_msg.end
if end == -1:
end = len(chan.batch_record)
# TODO copying and truncating the records individually
# like this is brittle. Is there a more robust
# solution?
chan.batch_record = chan.batch_record[
rsp_msg.start:end:rsp_msg.step
]
chan.epoch_record = chan.epoch_record[
rsp_msg.start:end:rsp_msg.step
]
chan.example_record = chan.example_record[
rsp_msg.start:end:rsp_msg.step
]
chan.time_record = chan.time_record[
rsp_msg.start:end:rsp_msg.step
]
chan.val_record = chan.val_record[
rsp_msg.start:end:rsp_msg.step
]
result[channel_name] = chan
else:
result[channel_name] = KeyError(
'Invalid channel: %s' % rsp_msg.channel_list
)
rsp_msg.data = result
self.req_sock.send_pyobj(rsp_msg)
except zmq.Again:
pass
self.counter += 1
class LiveMonitor(object):
"""
A utility class for requested data from a LiveMonitoring training
extension.
Parameters
----------
address : string
The IP address on which a LiveMonitoring process is listening.
req_port : int
The port number on which a LiveMonitoring process is listening.
"""
def __init__(self, address='127.0.0.1', req_port=5555):
"""
"""
if not zmq_available:
raise ImportError('zeromq needs to be installed to '
'use this module.')
self.address = 'tcp://%s' % address
assert(req_port > 0)
self.req_port = req_port
self.context = zmq.Context()
self.req_sock = self.context.socket(zmq.REQ)
self.req_sock.connect(self.address + ':' + str(self.req_port))
self.channels = {}
def list_channels(self):
"""
Returns a list of the channels being monitored.
"""
self.req_sock.send_pyobj(ChannelListRequest())
return self.req_sock.recv_pyobj()
def update_channels(self, channel_list, start=-1, end=-1, step=1):
"""
Retrieves data for a specified set of channels and combines that data
with any previously retrived data.
This assumes all the channels have the same number of values. It is
unclear as to whether this is a reasonable assumption. If they do not
have the same number of values then it may request to much or too
little data leading to duplicated data or wholes in the data
respectively. This could be made more robust by making a call to
retrieve all the data for all of the channels.
Parameters
----------
channel_list : list
A list of the channels for which data should be requested.
start : int
The starting epoch for which data should be requested.
step : int
The number of epochs to be skipped between data points.
"""
assert((start == -1 and end == -1) or end > start)
if start == -1:
start = 0
if len(self.channels.keys()) > 0:
channel_name = list(self.channels.keys())[0]
start = len(self.channels[channel_name].epoch_record)
self.req_sock.send_pyobj(ChannelsRequest(
channel_list, start=start, end=end, step=step
))
rsp_msg = self.req_sock.recv_pyobj()
if isinstance(rsp_msg.data, Exception):
raise rsp_msg.data
for channel in rsp_msg.data.keys():
rsp_chan = rsp_msg.data[channel]
if isinstance(rsp_chan, Exception):
raise rsp_chan
if channel not in self.channels.keys():
self.channels[channel] = rsp_chan
else:
chan = self.channels[channel]
chan.batch_record += rsp_chan.batch_record
chan.epoch_record += rsp_chan.epoch_record
chan.example_record += rsp_chan.example_record
chan.time_record += rsp_chan.time_record
chan.val_record += rsp_chan.val_record
def follow_channels(self, channel_list):
"""
Tracks and plots a specified set of channels in real time.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
"""
if not pyplot_available:
raise ImportError('pyplot needs to be installed for '
'this functionality.')
plt.clf()
plt.ion()
while True:
self.update_channels(channel_list)
plt.clf()
for channel_name in self.channels:
plt.plot(
self.channels[channel_name].epoch_record,
self.channels[channel_name].val_record,
label=channel_name
)
plt.legend()
plt.ion()
plt.draw()
| bsd-3-clause |
Tong-Chen/scikit-learn | sklearn/cross_validation.py | 2 | 50225 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import check_arrays, check_random_state, safe_mask
from .utils.fixes import unique
from .externals.joblib import Parallel, delayed
from .externals.six import string_types, with_metaclass
from .metrics.scorer import _deprecate_loss_and_score_funcs
__all__ = ['Bootstrap',
'KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'check_cv',
'cross_val_score',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n, indices=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
self._indices = indices
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
indices = self._indices
if indices:
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
if indices:
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p, indices=None):
super(LeavePOut, self).__init__(n, indices)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, indices):
super(_BaseKFold, self).__init__(n, indices)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds=%d.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf)
sklearn.cross_validation.KFold(n=4, n_folds=2)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, indices)
random_state = check_random_state(random_state)
self.idxs = np.arange(n)
if shuffle:
random_state.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold, which
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf)
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, indices=None):
super(StratifiedKFold, self).__init__(len(y), n_folds, indices)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = unique(y, return_inverse=True)
label_counts = np.bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [KFold(max(c, self.n_folds), self.n_folds)
for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels, indices=None):
super(LeaveOneLabelOut, self).__init__(len(labels), indices)
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p, indices=None):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels), indices)
self.labels = np.array(labels, copy=True)
self.unique_labels = unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class Bootstrap(object):
"""Random sampling with replacement cross-validation iterator
Provides train/test indices to split data in train test sets
while resampling the input n_iter times: each time a new
random split of the data is performed and then samples are drawn
(with replacement) on each side of the split to build the training
and test sets.
Note: contrary to other cross-validation strategies, bootstrapping
will allow some samples to occur several times in each splits. However
a sample that occurs in the train split will never occur in the test
split and vice-versa.
If you want each sample to occur at most once you should probably
use ShuffleSplit cross validation instead.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default is 3)
Number of bootstrapping iterations
train_size : int or float (default is 0.5)
If int, number of samples to include in the training split
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split.
test_size : int or float or None (default is None)
If int, number of samples to include in the training set
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split.
If None, n_test is set as the complement of n_train.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> bs = cross_validation.Bootstrap(9, random_state=0)
>>> len(bs)
3
>>> print(bs)
Bootstrap(9, n_iter=3, train_size=5, test_size=4, random_state=0)
>>> for train_index, test_index in bs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [1 8 7 7 8] TEST: [0 3 0 5]
TRAIN: [5 4 2 4 2] TEST: [6 7 1 0]
TRAIN: [4 7 0 1 1] TEST: [5 3 6 5]
See also
--------
ShuffleSplit: cross validation using random permutations.
"""
# Static marker to be able to introspect the CV type
indices = True
def __init__(self, n, n_iter=3, train_size=.5, test_size=None,
random_state=None, n_bootstraps=None):
self.n = n
if n_bootstraps is not None: # pragma: no cover
warnings.warn("n_bootstraps was renamed to n_iter and will "
"be removed in 0.16.", DeprecationWarning)
n_iter = n_bootstraps
self.n_iter = n_iter
if (isinstance(train_size, numbers.Real) and train_size >= 0.0
and train_size <= 1.0):
self.train_size = int(ceil(train_size * n))
elif isinstance(train_size, numbers.Integral):
self.train_size = train_size
else:
raise ValueError("Invalid value for train_size: %r" %
train_size)
if self.train_size > n:
raise ValueError("train_size=%d should not be larger than n=%d" %
(self.train_size, n))
if isinstance(test_size, numbers.Real) and 0.0 <= test_size <= 1.0:
self.test_size = int(ceil(test_size * n))
elif isinstance(test_size, numbers.Integral):
self.test_size = test_size
elif test_size is None:
self.test_size = self.n - self.train_size
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if self.test_size > n:
raise ValueError("test_size=%d should not be larger than n=%d" %
(self.test_size, n))
self.random_state = random_state
def __iter__(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_train = permutation[:self.train_size]
ind_test = permutation[self.train_size:self.train_size
+ self.test_size]
# bootstrap in each split individually
train = rng.randint(0, self.train_size,
size=(self.train_size,))
test = rng.randint(0, self.test_size,
size=(self.test_size,))
yield ind_train[train], ind_test[test]
def __repr__(self):
return ('%s(%d, n_iter=%d, train_size=%d, test_size=%d, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
self.train_size,
self.test_size,
self.random_state,
))
def __len__(self):
return self.n_iter
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning)
self.n = n
self.n_iter = n_iter
if n_iterations is not None: # pragma: no cover
warnings.warn("n_iterations was renamed to n_iter for consistency "
" and will be removed in 0.16.")
self.n_iter = n_iterations
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._indices = indices
self.n_train, self.n_test = _validate_shuffle_split(n,
test_size,
train_size)
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
if self._indices:
for train, test in self._iter_indices():
yield train, test
return
for train, test in self._iter_indices():
train_m = np.zeros(self.n, dtype=bool)
test_m = np.zeros(self.n, dtype=bool)
train_m[train] = True
test_m[test] = True
yield train_m, test_m
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
See also
--------
Bootstrap: cross-validation using re-sampling with replacement.
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, indices, random_state,
n_iterations)
self.y = np.array(y)
self.classes, self.y_indices = unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(np.bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = np.bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(n_i[i] + t_i[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
##############################################################################
def _cross_val_score(estimator, X, y, scorer, train, test, verbose,
fit_params):
"""Inner loop for cross validation"""
n_samples = X.shape[0] if sp.issparse(X) else len(X)
fit_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__') and len(v) == n_samples else v)
for k, v in fit_params.items()])
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_train = [X[idx] for idx in train]
X_test = [X[idx] for idx in test]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
X_train = X[np.ix_(train, train)]
X_test = X[np.ix_(test, train)]
else:
X_train = X[safe_mask(X, train)]
X_test = X[safe_mask(X, test)]
if y is None:
y_train = None
y_test = None
else:
y_train = y[train]
y_test = y[test]
estimator.fit(X_train, y_train, **fit_params)
if scorer is None:
score = estimator.score(X_test, y_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s)"
" instead." % (str(score), type(score)))
if verbose > 1:
print("score: %f" % score)
return score
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, score_func=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional, default: None
A cross-validation generator. If None, a 3-fold cross
validation is used or 3-fold stratified cross-validation
when y is supplied and estimator is a classifier.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = check_arrays(X, y, sparse_format='csr', allow_lists=True)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = _deprecate_loss_and_score_funcs(
loss_func=None,
score_func=score_func,
scoring=scoring
)
if scorer is None and not hasattr(estimator, 'score'):
raise TypeError(
"If no scoring is specified, the estimator passed "
"should have a 'score' method. The estimator %s "
"does not." % estimator)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
fit_params = fit_params if fit_params is not None else {}
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(
delayed(_cross_val_score)(clone(estimator), X, y, scorer, train, test,
verbose, fit_params)
for train, test in cv)
return np.array(scores)
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier, warn_mask=True)
def _check_cv(cv, X=None, y=None, classifier=False, warn_mask=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
needs_indices = is_sparse or not hasattr(X, "shape")
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if warn_mask and not needs_indices:
warnings.warn('check_cv will return indices instead of boolean '
'masks from 0.17', DeprecationWarning)
else:
needs_indices = None
if classifier:
cv = StratifiedKFold(y, cv, indices=needs_indices)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv, indices=needs_indices)
if needs_indices and not getattr(cv, "_indices", True):
raise ValueError("Sparse data and lists require indices-based cross"
" validation generator, got: %r", cv)
return cv
def permutation_test_score(estimator, X, y, score_func=None, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape = [n_permutations]
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `score_func` returns bigger
numbers for better scores (e.g., accuracy_score). If `score_func` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = check_arrays(X, y, sparse_format='csr')
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = _deprecate_loss_and_score_funcs(
loss_func=None,
score_func=score_func,
scoring=scoring
)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps calls to ``check_arrays`` and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
dtype : a numpy dtype instance, None by default
Enforce a specific dtype.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> a, b = np.arange(10).reshape((5, 2)), range(5)
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(b)
[0, 1, 2, 3, 4]
>>> a_train, a_test, b_train, b_test = train_test_split(
... a, b, test_size=0.33, random_state=42)
...
>>> a_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> b_train
array([2, 0, 3])
>>> a_test
array([[2, 3],
[8, 9]])
>>> b_test
array([1, 4])
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
options['sparse_format'] = 'csr'
if test_size is None and train_size is None:
test_size = 0.25
arrays = check_arrays(*arrays, **options)
n_samples = arrays[0].shape[0]
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((a[train], a[test]) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
Myasuka/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
pllim/ginga | ginga/mplw/ImageViewCanvasMpl.py | 3 | 1462 | #
# ImageViewCanvasMpl.py -- A FITS image widget with canvas drawing in Matplotlib
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.mplw import ImageViewMpl
from ginga.canvas.mixins import DrawingMixin, CanvasMixin, CompoundMixin
from ginga.util.toolbox import ModeIndicator
class ImageViewCanvasError(ImageViewMpl.ImageViewMplError):
pass
class ImageViewCanvas(ImageViewMpl.ImageViewZoom,
DrawingMixin, CanvasMixin, CompoundMixin):
def __init__(self, logger=None, rgbmap=None, settings=None,
bindmap=None, bindings=None):
ImageViewMpl.ImageViewZoom.__init__(self, logger=logger,
rgbmap=rgbmap,
settings=settings,
bindmap=bindmap,
bindings=bindings)
CompoundMixin.__init__(self)
CanvasMixin.__init__(self)
DrawingMixin.__init__(self)
# we are both a viewer and a canvas
self.set_canvas(self, private_canvas=self)
self._mi = ModeIndicator(self)
def redraw_data(self, whence=0):
super(ImageViewCanvas, self).redraw_data(whence=whence)
# refresh the matplotlib canvas
if self.figure is not None and self.figure.canvas is not None:
self.figure.canvas.draw()
#END
| bsd-3-clause |
pratapvardhan/pandas | pandas/tests/scalar/timedelta/test_construction.py | 6 | 8805 | # -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import Timedelta
def test_construction():
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
assert Timedelta(10, unit='d').value == expected
assert Timedelta(10.0, unit='d').value == expected
assert Timedelta('10 days').value == expected
assert Timedelta(days=10).value == expected
assert Timedelta(days=10.0).value == expected
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
assert Timedelta('10 days 00:00:10').value == expected
assert Timedelta(days=10, seconds=10).value == expected
assert Timedelta(days=10, milliseconds=10 * 1000).value == expected
assert Timedelta(days=10,
microseconds=10 * 1000 * 1000).value == expected
# rounding cases
assert Timedelta(82739999850000).value == 82739999850000
assert ('0 days 22:58:59.999850' in str(Timedelta(82739999850000)))
assert Timedelta(123072001000000).value == 123072001000000
assert ('1 days 10:11:12.001' in str(Timedelta(123072001000000)))
# string conversion with/without leading zero
# GH#9570
assert Timedelta('0:00:00') == timedelta(hours=0)
assert Timedelta('00:00:00') == timedelta(hours=0)
assert Timedelta('-1:00:00') == -timedelta(hours=1)
assert Timedelta('-01:00:00') == -timedelta(hours=1)
# more strings & abbrevs
# GH#8190
assert Timedelta('1 h') == timedelta(hours=1)
assert Timedelta('1 hour') == timedelta(hours=1)
assert Timedelta('1 hr') == timedelta(hours=1)
assert Timedelta('1 hours') == timedelta(hours=1)
assert Timedelta('-1 hours') == -timedelta(hours=1)
assert Timedelta('1 m') == timedelta(minutes=1)
assert Timedelta('1.5 m') == timedelta(seconds=90)
assert Timedelta('1 minute') == timedelta(minutes=1)
assert Timedelta('1 minutes') == timedelta(minutes=1)
assert Timedelta('1 s') == timedelta(seconds=1)
assert Timedelta('1 second') == timedelta(seconds=1)
assert Timedelta('1 seconds') == timedelta(seconds=1)
assert Timedelta('1 ms') == timedelta(milliseconds=1)
assert Timedelta('1 milli') == timedelta(milliseconds=1)
assert Timedelta('1 millisecond') == timedelta(milliseconds=1)
assert Timedelta('1 us') == timedelta(microseconds=1)
assert Timedelta('1 micros') == timedelta(microseconds=1)
assert Timedelta('1 microsecond') == timedelta(microseconds=1)
assert Timedelta('1.5 microsecond') == Timedelta('00:00:00.000001500')
assert Timedelta('1 ns') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nano') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nanosecond') == Timedelta('00:00:00.000000001')
# combos
assert Timedelta('10 days 1 hour') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h 1m 1s') == timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s 3us') == -timedelta(
days=10, hours=1, minutes=1, seconds=1, microseconds=3)
assert Timedelta('-10 days 1 h 1.5m 1s 3us') == -timedelta(
days=10, hours=1, minutes=1, seconds=31, microseconds=3)
# Currently invalid as it has a - on the hh:mm:dd part
# (only allowed on the days)
with pytest.raises(ValueError):
Timedelta('-10 days -1 h 1.5m 1s 3us')
# only leading neg signs are allowed
with pytest.raises(ValueError):
Timedelta('10 days -1 h 1.5m 1s 3us')
# no units specified
with pytest.raises(ValueError):
Timedelta('3.1415')
# invalid construction
tm.assert_raises_regex(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assert_raises_regex(ValueError,
"unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assert_raises_regex(ValueError,
"cannot construct a Timedelta from the "
"passed arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
assert Timedelta(10.5, unit='s').value == expected
# offset
assert pd.to_timedelta(pd.offsets.Hour(2)) == Timedelta(hours=2)
assert Timedelta(pd.offsets.Hour(2)) == Timedelta(hours=2)
assert Timedelta(pd.offsets.Second(2)) == Timedelta(seconds=2)
# GH#11995: unicode
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
assert result == expected
assert (pd.to_timedelta(pd.offsets.Hour(2)) ==
Timedelta(u'0 days, 02:00:00'))
with pytest.raises(ValueError):
Timedelta(u'foo bar')
@pytest.mark.parametrize('item', list({'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}.items()))
@pytest.mark.parametrize('npdtype', [np.int64, np.int32, np.int16,
np.float64, np.float32, np.float16])
def test_td_construction_with_np_dtypes(npdtype, item):
# GH#8757: test construction with np dtypes
pykwarg, npkwarg = item
expected = np.timedelta64(1, npkwarg).astype('m8[ns]').view('i8')
assert Timedelta(**{pykwarg: npdtype(1)}).value == expected
@pytest.mark.parametrize('val', [
'1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999'])
def test_td_from_repr_roundtrip(val):
# round-trip both for string and value
td = Timedelta(val)
assert Timedelta(td.value) == td
# str does not normally display nanos
if not td.nanoseconds:
assert Timedelta(str(td)) == td
assert Timedelta(td._repr_base(format='all')) == td
def test_overflow_on_construction():
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
with pytest.raises(OverflowError):
pd.Timedelta(value)
# xref GH#17637
with pytest.raises(OverflowError):
pd.Timedelta(7 * 19999, unit='D')
with pytest.raises(OverflowError):
pd.Timedelta(timedelta(days=13 * 19999))
@pytest.mark.parametrize('fmt,exp', [
('P6DT0H50M3.010010012S', Timedelta(days=6, minutes=50, seconds=3,
milliseconds=10, microseconds=10,
nanoseconds=12)),
('P-6DT0H50M3.010010012S', Timedelta(days=-6, minutes=50, seconds=3,
milliseconds=10, microseconds=10,
nanoseconds=12)),
('P4DT12H30M5S', Timedelta(days=4, hours=12, minutes=30, seconds=5)),
('P0DT0H0M0.000000123S', Timedelta(nanoseconds=123)),
('P0DT0H0M0.00001S', Timedelta(microseconds=10)),
('P0DT0H0M0.001S', Timedelta(milliseconds=1)),
('P0DT0H1M0S', Timedelta(minutes=1)),
('P1DT25H61M61S', Timedelta(days=1, hours=25, minutes=61, seconds=61))
])
def test_iso_constructor(fmt, exp):
assert Timedelta(fmt) == exp
@pytest.mark.parametrize('fmt', [
'PPPPPPPPPPPP', 'PDTHMS', 'P0DT999H999M999S',
'P1DT0H0M0.0000000000000S', 'P1DT0H0M00000000000S',
'P1DT0H0M0.S'])
def test_iso_constructor_raises(fmt):
with tm.assert_raises_regex(ValueError, 'Invalid ISO 8601 Duration '
'format - {}'.format(fmt)):
Timedelta(fmt)
@pytest.mark.parametrize('constructed_td, conversion', [
(Timedelta(nanoseconds=100), '100ns'),
(Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1, milliseconds=1,
microseconds=1, nanoseconds=1), 694861001001001),
(Timedelta(microseconds=1) + Timedelta(nanoseconds=1), '1us1ns'),
(Timedelta(microseconds=1) - Timedelta(nanoseconds=1), '999ns'),
(Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2), '990ns')])
def test_td_constructor_on_nanoseconds(constructed_td, conversion):
# GH#9273
assert constructed_td == Timedelta(conversion)
def test_td_constructor_value_error():
with pytest.raises(TypeError):
Timedelta(nanoseconds='abc')
| bsd-3-clause |
positiveban/positiveban.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
jereze/scikit-learn | sklearn/utils/estimator_checks.py | 31 | 52862 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
hrjn/scikit-learn | sklearn/utils/extmath.py | 19 | 27505 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse, csr_matrix
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
from ..exceptions import NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
if not isinstance(X, csr_matrix):
X = csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Falling back to np.dot. '
'Data must be of same type of either '
'32 or 64 bit float for the BLAS function, gemm, to be '
'used for an efficient dot operation. ',
NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.exceptions import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : 2D array
The input data matrix
size : integer
Size of the return array
n_iter : integer
Number of power iterations used to stabilize the result
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q : 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter='auto',
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter : int or 'auto' (default is 'auto')
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) `n_iter` in which case is set to 7.
This improves precision with few components.
.. versionchanged:: 0.18
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == 'auto':
# Checks if the number of iterations is explicitely specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < .1 * min(M.shape) else 4
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X : array-like, shape (M, N) or (M, )
Argument to the logistic function
out : array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out : array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X : array-like, shape (M, N)
Argument to the logistic function
copy : bool, optional
Copy X or not.
Returns
-------
out : array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
# sum is as unstable as cumsum for numpy < 1.9
if np_version < (1, 9):
return np.cumsum(arr, axis=axis, dtype=np.float64)
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
| bsd-3-clause |
dgasmith/SICM2-Software-Summer-School-2014 | Useful_Resources/Python_Scripts/extract_molpro_rks.py | 1 | 1073 | # This is in the public domain.
# Created by Daniel Smith 7/10/14
import glob
import pandas as pd
infiles = glob.glob('Data/*.out')
def read(infile):
"""Read 'infile' into a list where each element is a line of the file"""
return open(infile, 'r').readlines()
def find(data, string, position=False, dtype=float):
"""Looks for the pattern 'string' in data"""
fdata = filter(lambda x: string in x,data)
if position is False:
return list(fdata)
else:
return [dtype(x.split()[position]) for x in fdata]
output = []
for inp in infiles:
#Grab systematic indices from the filename
name = inp.split('/')[-1].split('+')
index = name[1].split('_')
#Grab RKS energies
data = read(inp)
energies = find(data, '!RKS STATE 1.1 Energy', -1)
energy = (energies[0] - energies[1] - energies[2])*627.509
#append to output
output.append(index + [energy])
#Create and print pandas DataFrame
output = pd.DataFrame(output)
output.columns = ['r','d','theta','x','y','z','BLYP/QZVP']
print output
| mit |
Tejas-Khot/deep-learning | extreme/test_conv_destin.py | 2 | 8185 | __author__ = 'mong'
__author__ = 'teddy'
from destin.network import *
from destin.load_data import *
import cPickle as pickle
from time import time
from sklearn import svm
import os
# *****Define Parameters for the Network and nodes
# Network Params
num_layers = 4
patch_mode = 'Adjacent'
image_type = 'Color'
network_mode = True
# For a Node: specify Your Algorithm Choice and Corresponding parameters
# ******************************************************************************************
#
# Incremental Clustering
#
num_nodes_per_layer = [[8, 8], [4, 4], [2, 2], [1, 1]]
num_cents_per_layer = [75, 75, 50 ,25]
pool_size = [(1,1),(1,1),(1,1),(1,1)] #pooling size: The first number is the number of vector
#you want to pool. For example, (64,1) will pool all the
#vector in the first layer. (16,1) will divide the first layer
#in to 4 quarters and pool each of them. (4,1) will divide the
#first layer in to 16th pieces and pool each of them
print "Uniform DeSTIN with Clustering"
algorithm_choice = 'Clustering'
alg_params = {'mr': 0.01, 'vr': 0.01, 'sr': 0.001, 'DIMS': [],
'CENTS': [], 'node_id': [],
'num_cents_per_layer': num_cents_per_layer}
# ******************************************************************************************
"""
# ******************************************************************************************
# Hierarchy Of AutoEncoders
print "Uniform DeSTIN with AutoEncoders"
num_nodes_per_layer = [[8, 8], [4, 4], [2, 2], [1, 1]]
num_cents_per_layer = [36, 36, 36, 36]
algorithm_choice = 'AutoEncoder'
inp_size = 48
hid_size = 100
alg_params = [[inp_size, hid_size], [4 * hid_size, hid_size],
[4 * hid_size, hid_size], [4 * hid_size, hid_size]]
# ******************************************************************************************
"""
"""
#Load Data, 10 loads 5 batches in total 50,000
# 1 to 5 load batch_1 to batch_5training images, 1 to five
[data, labels] = loadCifar(10)
del labels
# Declare a Network Object and load Training Data
cifar_stat = load_cifar(4)
DESTIN = Network(
num_layers, algorithm_choice, alg_params, num_nodes_per_layer, cifar_stat, patch_mode, image_type,)
#, , , , cifar_stat, patch_mode='Adjacent', image_type='Color'
DESTIN.setmode(network_mode)
DESTIN.set_lowest_layer(0)
# Load Data
# Modify the location of the training data in file "load_data.py"
# data = np.random.rand(5,32*32*3)
# Initialize Network; there is is also a layer-wise initialization option
DESTIN.init_network()
#Train the Network
print "DeSTIN Training/with out Feature extraction"
for epoch in range(5):
for I in range(data.shape[0]): # For Every image in the data set
if I % 10000 == 0:
print("Training Iteration Number %d" % I)
for L in range(DESTIN.number_of_layers):
if L == 0:
img = data[I][:].reshape(32, 32, 3)
# This is equivalent to sharing centroids or kernels
DESTIN.layers[0][L].load_input(img, [4, 4])
DESTIN.layers[0][L].do_layer_learning()
#DESTIN.layers[0][L].shared_learning()
else:
DESTIN.layers[0][L].load_input(
DESTIN.layers[0][L - 1].nodes, [2, 2])
DESTIN.layers[0][L].do_layer_learning()
#DESTIN.layers[0][L].shared_learning()
print "Epoch = " + str(epoch+1)
pickle.dump( DESTIN, open( "DESTIN_conv_[75, 75, 50 ,25]", "wb" ) )
print "done"
"""
DESTIN=pickle.load( open( "DESTIN_conv_[75, 75, 50 ,25]", "rb" ) )
print("DesTIN running/Feature Extraction/ over the Training Data")
network_mode = False
DESTIN.setmode(network_mode)
# Testin it over the training set
[data, labels] = loadCifar(10)
del labels
if not os.path.exists('train'):
os.makedirs('train')
for I in range(data.shape[0]): # For Every image in the data set
if I % 1000 == 0:
print("Testing Iteration Number %d" % I)
for L in range(DESTIN.number_of_layers):
if L == 0:
img = data[I][:].reshape(32, 32, 3)
DESTIN.layers[0][L].load_input(img, [4, 4])
DESTIN.layers[0][L].do_layer_learning()
else:
DESTIN.layers[0][L].load_input(
DESTIN.layers[0][L - 1].nodes, [2, 2])
DESTIN.layers[0][L].do_layer_learning()
DESTIN.update_belief_exporter(pool_size, True ,'average_exc_pad') #( maxpool_shape , ignore_border, mode)
if I in range(199, 50999, 200):
Name = 'train/' + str(I + 1) + '.txt'
#file_id = open(Name, 'w')
np.savetxt(Name, np.array(DESTIN.network_belief['belief']))
#file_id.close()
# Get rid-off accumulated training beliefs
DESTIN.clean_belief_exporter()
print("Feature Extraction with the test set")
[data, labels] = loadCifar(6)
del labels
if not os.path.exists('test'):
os.makedirs('test')
for I in range(data.shape[0]): # For Every image in the data set
if I % 1000 == 0:
print("Testing Iteration Number %d" % (I+50000))
for L in range(DESTIN.number_of_layers):
if L == 0:
img = data[I][:].reshape(32, 32, 3)
DESTIN.layers[0][L].load_input(img, [4, 4])
DESTIN.layers[0][L].do_layer_learning() # Calculates belief for
else:
DESTIN.layers[0][L].load_input(
DESTIN.layers[0][L - 1].nodes, [2, 2])
DESTIN.layers[0][L].do_layer_learning()
DESTIN.update_belief_exporter(pool_size, True ,'average_exc_pad')
if I in range(199, 10199, 200):
Name = 'test/' + str(I + 1) + '.txt'
np.savetxt(Name, np.array(DESTIN.network_belief['belief']))
# Get rid-off accumulated training beliefs
DESTIN.clean_belief_exporter()
print "Training With SVM"
print("Loading training and test labels")
[trainData, trainLabel] = loadCifar(10)
del trainData
[testData, testLabel] = loadCifar(6)
del testData
# Load Training and Test Data/Extracted from DeSTIN
# here we do not use the whole set of feature extracted from DeSTIN
# We use the features which are extracted from the top few layers
print("Loading training and testing features")
I = 199
Name = 'train/' + str(I + 1) + '.txt'
trainData = np.ravel(np.loadtxt(Name))
for I in range(399, 50000, 200):
Name = 'train/' + str(I + 1) + '.txt'
file_id = open(Name, 'r')
Temp = np.ravel(np.loadtxt(Name))
trainData = np.hstack((trainData, Temp))
del Temp
Len = np.shape(trainData)[0]
Size = np.size(trainData)
Width = Len/50000
print Len
print Width*50000
trainData = trainData.reshape((50000, Width))
# Training SVM
SVM = svm.LinearSVC(C=1)
# C=100, kernel='rbf')
print "Training the SVM"
trainLabel = np.squeeze(np.asarray(trainLabel).reshape(50000, 1))
#print trainData
SVM.fit(trainData, trainLabel)
print("Training Score = %f " % float(100 * SVM.score(trainData, trainLabel)))
#print("Training Accuracy = %f" % (SVM.score(trainData, trainLabel) * 100))
eff = {}
eff['train'] = SVM.score(trainData, trainLabel) * 100
del trainData
testData = np.array([])
print("Loading training and testing features")
I = 399
Name = 'test/' + str(I + 1) + '.txt'
testData = np.ravel(np.loadtxt(Name))
for I in range(599, 10000, 200):
Name = 'test/' + str(I + 1) + '.txt'
file_id = open(Name, 'r')
Temp = np.ravel(np.loadtxt(Name))
testData = np.hstack((testData, Temp))
del Temp
Len = np.shape(testData)[0]
Size = np.size(testData)
I = 399
Name = 'test/' + str(I + 1) + '.txt'
testData1 = np.ravel(np.loadtxt(Name))
print np.shape(testData1)[0]/200.0
Width = np.float(Len)/9800.0
print Len
print Size
testData = testData.reshape((9800, Width))
print "Predicting Test samples"
print("Test Score = %f" % float(100 * SVM.score(testData, testLabel[200:10000])))
#print("Training Accuracy = %f" % (SVM.score(testData, testLabel) * 100))
eff['test'] = SVM.score(testData, testLabel[200:10000]) * 100
io.savemat('accuracy.mat', eff)
| gpl-2.0 |
lazywei/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 129 | 7848 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
hlin117/statsmodels | statsmodels/graphics/dotplots.py | 31 | 18190 | import numpy as np
from statsmodels.compat import range
from . import utils
def dot_plot(points, intervals=None, lines=None, sections=None,
styles=None, marker_props=None, line_props=None,
split_names=None, section_order=None, line_order=None,
stacked=False, styles_order=None, striped=False,
horizontal=True, show_names="both",
fmt_left_name=None, fmt_right_name=None,
show_section_titles=None, ax=None):
"""
Produce a dotplot similar in style to those in Cleveland's
"Visualizing Data" book. These are also known as "forest plots".
Parameters
----------
points : array_like
The quantitative values to be plotted as markers.
intervals : array_like
The intervals to be plotted around the points. The elements
of `intervals` are either scalars or sequences of length 2. A
scalar indicates the half width of a symmetric interval. A
sequence of length 2 contains the left and right half-widths
(respectively) of a nonsymmetric interval. If None, no
intervals are drawn.
lines : array_like
A grouping variable indicating which points/intervals are
drawn on a common line. If None, each point/interval appears
on its own line.
sections : array_like
A grouping variable indicating which lines are grouped into
sections. If None, everything is drawn in a single section.
styles : array_like
A grouping label defining the plotting style of the markers
and intervals.
marker_props : dict
A dictionary mapping style codes (the values in `styles`) to
dictionaries defining key/value pairs to be passed as keyword
arguments to `plot` when plotting markers. Useful keyword
arguments are "color", "marker", and "ms" (marker size).
line_props : dict
A dictionary mapping style codes (the values in `styles`) to
dictionaries defining key/value pairs to be passed as keyword
arguments to `plot` when plotting interval lines. Useful
keyword arguments are "color", "linestyle", "solid_capstyle",
and "linewidth".
split_names : string
If not None, this is used to split the values of `lines` into
substrings that are drawn in the left and right margins,
respectively. If None, the values of `lines` are drawn in the
left margin.
section_order : array_like
The section labels in the order in which they appear in the
dotplot.
line_order : array_like
The line labels in the order in which they appear in the
dotplot.
stacked : boolean
If True, when multiple points or intervals are drawn on the
same line, they are offset from each other.
styles_order : array_like
If stacked=True, this is the order in which the point styles
on a given line are drawn from top to bottom (if horizontal
is True) or from left to right (if horiontal is False). If
None (default), the order is lexical.
striped : boolean
If True, every other line is enclosed in a shaded box.
horizontal : boolean
If True (default), the lines are drawn horizontally, otherwise
they are drawn vertically.
show_names : string
Determines whether labels (names) are shown in the left and/or
right margins (top/bottom margins if `horizontal` is True).
If `both`, labels are drawn in both margins, if 'left', labels
are drawn in the left or top margin. If `right`, labels are
drawn in the right or bottom margin.
fmt_left_name : function
The left/top margin names are passed through this function
before drawing on the plot.
fmt_right_name : function
The right/bottom marginnames are passed through this function
before drawing on the plot.
show_section_titles : bool or None
If None, section titles are drawn only if there is more than
one section. If False/True, section titles are never/always
drawn, respectively.
ax : matplotlib.axes
The axes on which the dotplot is drawn. If None, a new axes
is created.
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Notes
-----
`points`, `intervals`, `lines`, `sections`, `styles` must all have
the same length whenever present.
Examples
--------
This is a simple dotplot with one point per line:
>>> dot_plot(points=point_values)
This dotplot has labels on the lines (if elements in
`label_values` are repeated, the corresponding points appear on
the same line):
>>> dot_plot(points=point_values, lines=label_values)
References
----------
* Cleveland, William S. (1993). "Visualizing Data". Hobart
Press.
* Jacoby, William G. (2006) "The Dot Plot: A Graphical Display
for Labeled Quantitative Values." The Political Methodologist
14(1): 6-14.
"""
import matplotlib.transforms as transforms
fig, ax = utils.create_mpl_ax(ax)
# Convert to numpy arrays if that is not what we are given.
points = np.asarray(points)
asarray_or_none = lambda x : None if x is None else np.asarray(x)
intervals = asarray_or_none(intervals)
lines = asarray_or_none(lines)
sections = asarray_or_none(sections)
styles = asarray_or_none(styles)
# Total number of points
npoint = len(points)
# Set default line values if needed
if lines is None:
lines = np.arange(npoint)
# Set default section values if needed
if sections is None:
sections = np.zeros(npoint)
# Set default style values if needed
if styles is None:
styles = np.zeros(npoint)
# The vertical space (in inches) for a section title
section_title_space = 0.5
# The number of sections
nsect = len(set(sections))
if section_order is not None:
nsect = len(set(section_order))
# The number of section titles
if show_section_titles == False:
draw_section_titles = False
nsect_title = 0
elif show_section_titles == True:
draw_section_titles = True
nsect_title = nsect
else:
draw_section_titles = nsect > 1
nsect_title = nsect if nsect > 1 else 0
# The total vertical space devoted to section titles.
section_space_total = section_title_space * nsect_title
# Add a bit of room so that points that fall at the axis limits
# are not cut in half.
ax.set_xmargin(0.02)
ax.set_ymargin(0.02)
if section_order is None:
lines0 = list(set(sections))
lines0.sort()
else:
lines0 = section_order
if line_order is None:
lines1 = list(set(lines))
lines1.sort()
else:
lines1 = line_order
# A map from (section,line) codes to index positions.
lines_map = {}
for i in range(npoint):
if section_order is not None and sections[i] not in section_order:
continue
if line_order is not None and lines[i] not in line_order:
continue
ky = (sections[i], lines[i])
if ky not in lines_map:
lines_map[ky] = []
lines_map[ky].append(i)
# Get the size of the axes on the parent figure in inches
bbox = ax.get_window_extent().transformed(
fig.dpi_scale_trans.inverted())
awidth, aheight = bbox.width, bbox.height
# The number of lines in the plot.
nrows = len(lines_map)
# The positions of the lowest and highest guideline in axes
# coordinates (for horizontal dotplots), or the leftmost and
# rightmost guidelines (for vertical dotplots).
bottom, top = 0, 1
if horizontal:
# x coordinate is data, y coordinate is axes
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
else:
# x coordinate is axes, y coordinate is data
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
# Space used for a section title, in axes coordinates
title_space_axes = section_title_space / aheight
# Space between lines
if horizontal:
dpos = (top - bottom - nsect_title*title_space_axes) /\
float(nrows)
else:
dpos = (top - bottom) / float(nrows)
# Determine the spacing for stacked points
if styles_order is not None:
style_codes = styles_order
else:
style_codes = list(set(styles))
style_codes.sort()
# Order is top to bottom for horizontal plots, so need to
# flip.
if horizontal:
style_codes = style_codes[::-1]
# nval is the maximum number of points on one line.
nval = len(style_codes)
if nval > 1:
stackd = dpos / (2.5*(float(nval)-1))
else:
stackd = 0.
# Map from style code to its integer position
#style_codes_map = {x: style_codes.index(x) for x in style_codes}
# python 2.6 compat version:
style_codes_map = dict((x, style_codes.index(x)) for x in style_codes)
# Setup default marker styles
colors = ["r", "g", "b", "y", "k", "purple", "orange"]
if marker_props is None:
#marker_props = {x: {} for x in style_codes}
# python 2.6 compat version:
marker_props = dict((x, {}) for x in style_codes)
for j in range(nval):
sc = style_codes[j]
if "color" not in marker_props[sc]:
marker_props[sc]["color"] = colors[j % len(colors)]
if "marker" not in marker_props[sc]:
marker_props[sc]["marker"] = "o"
if "ms" not in marker_props[sc]:
marker_props[sc]["ms"] = 10 if stackd == 0 else 6
# Setup default line styles
if line_props is None:
#line_props = {x: {} for x in style_codes}
# python 2.6 compat version:
line_props = dict((x, {}) for x in style_codes)
for j in range(nval):
sc = style_codes[j]
if "color" not in line_props[sc]:
line_props[sc]["color"] = "grey"
if "linewidth" not in line_props[sc]:
line_props[sc]["linewidth"] = 2 if stackd > 0 else 8
if horizontal:
# The vertical position of the first line.
pos = top - dpos/2 if nsect == 1 else top
else:
# The horizontal position of the first line.
pos = bottom + dpos/2
# Points that have already been labeled
labeled = set()
# Positions of the y axis grid lines
ticks = []
# Loop through the sections
for k0 in lines0:
# Draw a section title
if draw_section_titles:
if horizontal:
y0 = pos + dpos/2 if k0 == lines0[0] else pos
ax.fill_between((0, 1), (y0,y0),
(pos-0.7*title_space_axes,
pos-0.7*title_space_axes),
color='darkgrey',
transform=ax.transAxes,
zorder=1)
txt = ax.text(0.5, pos - 0.35*title_space_axes, k0,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
txt.set_fontweight("bold")
pos -= title_space_axes
else:
m = len([k for k in lines_map if k[0] == k0])
ax.fill_between((pos-dpos/2+0.01,
pos+(m-1)*dpos+dpos/2-0.01),
(1.01,1.01), (1.06,1.06),
color='darkgrey',
transform=ax.transAxes,
zorder=1, clip_on=False)
txt = ax.text(pos + (m-1)*dpos/2, 1.02, k0,
horizontalalignment='center',
verticalalignment='bottom',
transform=ax.transAxes)
txt.set_fontweight("bold")
jrow = 0
for k1 in lines1:
# No data to plot
if (k0, k1) not in lines_map:
continue
# Draw the guideline
if horizontal:
ax.axhline(pos, color='grey')
else:
ax.axvline(pos, color='grey')
# Set up the labels
if split_names is not None:
us = k1.split(split_names)
if len(us) >= 2:
left_label, right_label = us[0], us[1]
else:
left_label, right_label = k1, None
else:
left_label, right_label = k1, None
if fmt_left_name is not None:
left_label = fmt_left_name(left_label)
if fmt_right_name is not None:
right_label = fmt_right_name(right_label)
# Draw the stripe
if striped and jrow % 2 == 0:
if horizontal:
ax.fill_between((0, 1), (pos-dpos/2, pos-dpos/2),
(pos+dpos/2, pos+dpos/2),
color='lightgrey',
transform=ax.transAxes,
zorder=0)
else:
ax.fill_between((pos-dpos/2, pos+dpos/2),
(0, 0), (1, 1),
color='lightgrey',
transform=ax.transAxes,
zorder=0)
jrow += 1
# Draw the left margin label
if show_names.lower() in ("left", "both"):
if horizontal:
ax.text(-0.1/awidth, pos, left_label,
horizontalalignment="right",
verticalalignment='center',
transform=ax.transAxes,
family='monospace')
else:
ax.text(pos, -0.1/aheight, left_label,
horizontalalignment="center",
verticalalignment='top',
transform=ax.transAxes,
family='monospace')
# Draw the right margin label
if show_names.lower() in ("right", "both"):
if right_label is not None:
if horizontal:
ax.text(1 + 0.1/awidth, pos, right_label,
horizontalalignment="left",
verticalalignment='center',
transform=ax.transAxes,
family='monospace')
else:
ax.text(pos, 1 + 0.1/aheight, right_label,
horizontalalignment="center",
verticalalignment='bottom',
transform=ax.transAxes,
family='monospace')
# Save the vertical position so that we can place the
# tick marks
ticks.append(pos)
# Loop over the points in one line
for ji,jp in enumerate(lines_map[(k0,k1)]):
# Calculate the vertical offset
yo = 0
if stacked:
yo = -dpos/5 + style_codes_map[styles[jp]]*stackd
pt = points[jp]
# Plot the interval
if intervals is not None:
# Symmetric interval
if np.isscalar(intervals[jp]):
lcb, ucb = pt - intervals[jp],\
pt + intervals[jp]
# Nonsymmetric interval
else:
lcb, ucb = pt - intervals[jp][0],\
pt + intervals[jp][1]
# Draw the interval
if horizontal:
ax.plot([lcb, ucb], [pos+yo, pos+yo], '-',
transform=trans,
**line_props[styles[jp]])
else:
ax.plot([pos+yo, pos+yo], [lcb, ucb], '-',
transform=trans,
**line_props[styles[jp]])
# Plot the point
sl = styles[jp]
sll = sl if sl not in labeled else None
labeled.add(sl)
if horizontal:
ax.plot([pt,], [pos+yo,], ls='None',
transform=trans, label=sll,
**marker_props[sl])
else:
ax.plot([pos+yo,], [pt,], ls='None',
transform=trans, label=sll,
**marker_props[sl])
if horizontal:
pos -= dpos
else:
pos += dpos
# Set up the axis
if horizontal:
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("none")
ax.set_yticklabels([])
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position(('axes', -0.1/aheight))
ax.set_ylim(0, 1)
ax.yaxis.set_ticks(ticks)
ax.autoscale_view(scaley=False, tight=True)
else:
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("none")
ax.set_xticklabels([])
ax.spines['bottom'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_position(('axes', -0.1/awidth))
ax.set_xlim(0, 1)
ax.xaxis.set_ticks(ticks)
ax.autoscale_view(scalex=False, tight=True)
return fig
| bsd-3-clause |
Jimmy-Morzaria/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
hainm/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
probcomp/bdbcontrib | src/bql_utils.py | 1 | 16314 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import bayeslite.core
from bayeslite import bayesdb_open
from bayeslite import bql_quote_name as quote
from bayeslite.exception import BayesLiteException as BLE
from bayeslite.loggers import logged_query
from bayeslite.read_pandas import bayesdb_read_pandas_df
from bayeslite.sqlite3_util import sqlite3_quote_name
from bayeslite.util import cursor_value
from bdbcontrib.population_method import population_method
from bdbcontrib.population_method import population_method
###############################################################################
### PUBLIC ###
###############################################################################
@population_method(population_to_bdb=0, population_name=1)
def cardinality(bdb, table, cols=None):
"""Compute the number of unique values in the columns of a table.
Parameters
----------
bdb : __population_to_bdb__
table : __population_name__
Name of table.
cols : list<str>, optional
Columns to compute the unique values. Defaults to all.
Returns
-------
counts : pandas.DataFrame whose .columns are ['name', 'distinct_count'].
"""
# If no columns specified, use all.
if not cols:
sql = 'PRAGMA table_info(%s)' % (quote(table),)
res = bdb.sql_execute(sql)
cols = [r[1] for r in res]
names=[]
counts=[]
for col in cols:
sql = '''
SELECT COUNT (DISTINCT %s) FROM %s
''' % (quote(col), quote(table))
res = bdb.sql_execute(sql)
names.append(col)
counts.append(cursor_value(res))
return pd.DataFrame({'name': names, 'distinct_count': counts})
@population_method(population_to_bdb=0, population_name=1)
def nullify(bdb, table, value):
"""Replace specified values in a SQL table with ``NULL``.
Parameters
----------
bdb : __population_to_bdb__
table : str
The name of the table on which to act
value : stringable
The value to replace with ``NULL``
Examples
--------
>>> import bayeslite
>>> from bdbcontrib import plotutils
>>> with bayeslite.bayesdb_open('mydb.bdb') as bdb:
>>> bdbcontrib.nullify(bdb, 'mytable', 'NaN')
"""
# get a list of columns of the table
c = bdb.sql_execute('pragma table_info({})'.format(quote(table)))
columns = [r[1] for r in c]
for col in columns:
if value in ["''", '""']:
bql = '''
UPDATE {} SET {} = NULL WHERE {} = '';
'''.format(quote(table), quote(col), quote(col))
bdb.sql_execute(bql)
else:
bql = '''
UPDATE {} SET {} = NULL WHERE {} = ?;
'''.format(quote(table), quote(col), quote(col))
bdb.sql_execute(bql, (value,))
def cursor_to_df(cursor):
"""Converts SQLite3 cursor to a pandas DataFrame."""
# Do this in a savepoint to enable caching from row to row in BQL
# queries.
with cursor.connection.savepoint():
df = pd.DataFrame.from_records(cursor, coerce_float=True)
if not df.empty:
df.columns = [desc[0] for desc in cursor.description]
for col in df.columns:
try:
df[col] = df[col].astype(float)
except ValueError:
pass
return df
def table_to_df(bdb, table_name, column_names=None):
"""Return the contents of the given table as a pandas DataFrame.
If `column_names` is not None, fetch only those columns.
"""
qt = sqlite3_quote_name(table_name)
if column_names is not None:
qcns = ','.join(map(sqlite3_quote_name, column_names))
select_sql = 'SELECT %s FROM %s' % (qcns, qt)
else:
select_sql = 'SELECT * FROM %s' % (qt,)
return cursor_to_df(bdb.sql_execute(select_sql))
def df_to_table(df, tablename=None, **kwargs):
"""Return a new BayesDB with a single table with the data in `df`.
`df` is a Pandas DataFrame.
If `tablename` is not supplied, an arbitrary one will be chosen.
`kwargs` are passed on to `bayesdb_open`.
Returns a 2-tuple of the new BayesDB instance and the name of the
new table.
"""
bdb = bayesdb_open(**kwargs)
if tablename is None:
tablename = bdb.temp_table_name()
bayesdb_read_pandas_df(bdb, tablename, df, create=True)
return (bdb, tablename)
@population_method(population_to_bdb=0, interpret_bql=1, logger="logger")
def query(bdb, bql, bindings=None, logger=None):
"""Execute the `bql` query on the `bdb` instance.
Parameters
----------
bdb : __population_to_bdb__
bql : __interpret_bql__
bindings : Values to safely fill in for '?' in the BQL query.
Returns
-------
df : pandas.DataFrame
Table of results as a pandas dataframe.
"""
if bindings is None:
bindings = ()
if logger:
logger.info("BQL [%s] %s", bql, bindings)
cursor = bdb.execute(bql, bindings)
return cursor_to_df(cursor)
@population_method(population_to_bdb=0, population_name=1)
def describe_table(bdb, table_name):
"""Returns a DataFrame containing description of `table_name`.
Examples
--------
>>> bdbcontrib.describe_table(bdb, 'employees')
tabname | colno | name
----------+-------+--------
employees | 0 | name
employees | 1 | age
employees | 2 | weight
employees | 3 | height
"""
if not bayeslite.core.bayesdb_has_table(bdb, table_name):
raise BLE(NameError('No such table {}'.format(table_name)))
sql = '''
SELECT tabname, colno, name
FROM bayesdb_column
WHERE tabname=?
ORDER BY tabname ASC, colno ASC
'''
curs = bdb.sql_execute(sql, bindings=(table_name,))
return cursor_to_df(curs)
@population_method(population_to_bdb=0, generator_name=1)
def describe_generator(bdb, generator_name):
"""Returns a DataFrame containing description of `generator_name`.
Examples
--------
>>> bdbcontrib.describe_generator(bdb, 'employees_gen')
id | name | tabname | metamodel
---+---------------+-----------+----------
3 | employees_gen | employees | crosscat
"""
if not bayeslite.core.bayesdb_has_generator_default(bdb, generator_name):
raise BLE(NameError('No such generator {}'.format(generator_name)))
sql = '''
SELECT id, name, tabname, metamodel
FROM bayesdb_generator
WHERE name = ?
'''
curs = bdb.sql_execute(sql, bindings=(generator_name,))
return cursor_to_df(curs)
@population_method(population_to_bdb=0, generator_name='generator_name')
def variable_stattypes(bdb, generator_name=None):
assert generator_name
"""The modeled statistical types of each variable in order."""
if not bayeslite.core.bayesdb_has_generator_default(bdb, generator_name):
raise BLE(NameError('No such generator {}'.format(generator_name)))
sql = '''
SELECT c.colno AS colno, c.name AS name,
gc.stattype AS stattype
FROM bayesdb_generator AS g,
(bayesdb_column AS c LEFT OUTER JOIN
bayesdb_generator_column AS gc
USING (colno))
WHERE g.id = ? AND g.id = gc.generator_id
AND g.tabname = c.tabname
ORDER BY colno ASC;
'''
generator_id = bayeslite.core.bayesdb_get_generator_default(bdb,
generator_name)
curs = bdb.sql_execute(sql, bindings=(generator_id,))
return cursor_to_df(curs)
@population_method(population_to_bdb=0)
def list_metamodels(bdb):
df = query(bdb, "SELECT name FROM bayesdb_generator;")
return list(df['name'])
@population_method(population_to_bdb=0)
def list_tables(bdb):
df = query(bdb, """SELECT name FROM sqlite_master
WHERE type='table' AND
NAME NOT LIKE "bayesdb_%" AND
NAME NOT LIKE "sqlite_%";""")
return list(df['name'])
@population_method(population_to_bdb=0, generator_name=1)
def describe_generator_models(bdb, generator_name):
"""Returns a DataFrame containing description of the models
in `generator_name`.
Examples
--------
>>> bdbcontrib.describe_generator_models(bdb, 'employees_gen')
modelno | iterations
--------+-----------
0 | 100
"""
if not bayeslite.core.bayesdb_has_generator_default(bdb, generator_name):
raise BLE(NameError('No such generator {}'.format(generator_name)))
sql = '''
SELECT modelno, iterations FROM bayesdb_generator_model
WHERE generator_id = ?
'''
generator_id = bayeslite.core.bayesdb_get_generator_default(bdb,
generator_name)
curs = bdb.sql_execute(sql, bindings=(generator_id,))
return cursor_to_df(curs)
###############################################################################
### INTERNAL ###
###############################################################################
def get_column_info(bdb, generator_name):
generator_id = bayeslite.core.bayesdb_get_generator(bdb, generator_name)
sql = '''
SELECT c.colno, c.name, gc.stattype
FROM bayesdb_generator AS g,
bayesdb_generator_column AS gc,
bayesdb_column AS c
WHERE g.id = ?
AND gc.generator_id = g.id
AND gc.colno = c.colno
AND c.tabname = g.tabname
ORDER BY c.colno
'''
return bdb.sql_execute(sql, (generator_id,)).fetchall()
@population_method(population_to_bdb=0, generator_name=1)
def get_column_stattype(bdb, generator_name, column_name):
generator_id = bayeslite.core.bayesdb_get_generator(bdb, generator_name)
sql = '''
SELECT gc.stattype
FROM bayesdb_generator AS g,
bayesdb_generator_column AS gc,
bayesdb_column AS c
WHERE g.id = ?
AND gc.generator_id = g.id
AND gc.colno = c.colno
AND c.name = ?
AND c.tabname = g.tabname
ORDER BY c.colno
'''
cursor = bdb.sql_execute(sql, (generator_id, column_name,))
try:
row = cursor.next()
except StopIteration:
# XXX Temporary kludge for broken callers.
raise IndexError
else:
return row[0]
@population_method(population=0, generator_name='generator_name')
def analyze(self, models=100, minutes=0, iterations=0, checkpoint=0,
generator_name=None):
'''Run analysis.
models : integer
The number of models bounds the accuracy of predictive probabilities.
With ten models, then you get one decimal digit of interpretability,
with a hundred models, you get two, and so on.
minutes : integer
How long you want to let it run.
iterations : integer
How many iterations to let it run.
Returns:
A report indicating how many models have seen how many iterations,
and other info about model stability.
'''
assert generator_name is not None
if models > 0:
self.query('INITIALIZE %d MODELS IF NOT EXISTS FOR %s' %
(models, generator_name))
assert minutes == 0 or iterations == 0
else:
models = self.analysis_status(generator_name=generator_name).sum()
if minutes > 0:
if checkpoint == 0:
checkpoint = max(1, int(minutes * models / 200))
analyzer = ('ANALYZE %s FOR %d MINUTES CHECKPOINT %d ITERATION WAIT' %
(generator_name, minutes, checkpoint))
with logged_query(query_string=analyzer,
name=self.session_capture_name,
bindings=self.query('SELECT * FROM %t')):
self.query(analyzer)
elif iterations > 0:
if checkpoint == 0:
checkpoint = max(1, int(iterations / 20))
self.query(
'''ANALYZE %s FOR %d ITERATIONS CHECKPOINT %d ITERATION WAIT''' % (
generator_name, iterations, checkpoint))
else:
raise NotImplementedError('No default analysis strategy yet. '
'Please specify minutes or iterations.')
# itrs = self.per_model_analysis_status()
# models_with_fewest_iterations =
# itrs[itrs['iterations'] == itrs.min('index').head(0)[0]].index.tolist()
# TODO(gremio): run each model with as many iterations as it needs to get
# up to where it needs to get to, if that's larger?
# Nope. Vikash said there's no reason to think that's a good idea. Perhaps
# even better to have some young models mixed in with the old ones.
# I still think we should make some recommendation that scales for what
# "the right thing" is, where that's something that at least isn't known to
# suck.
return self.analysis_status(generator_name=generator_name)
@population_method(population=0, generator_name='generator_name')
def per_model_analysis_status(self, generator_name=None):
"""Return the number of iterations for each model."""
assert generator_name is not None
try:
return self.query('''SELECT iterations FROM bayesdb_generator_model
WHERE generator_id = (
SELECT id FROM bayesdb_generator WHERE name = ?)''',
(generator_name,))
except ValueError:
# Because, e.g. there is no generator yet, for an empty db.
return None
@population_method(population=0, generator_name='generator_name')
def analysis_status(self, generator_name=None):
"""Return the count of models for each number of iterations run."""
assert generator_name is not None
itrs = self.per_model_analysis_status(generator_name=generator_name)
if itrs is None or len(itrs) == 0:
emt = pd.DataFrame(columns=['count of model instances'])
emt.index.name = 'iterations'
return emt
vcs = pd.DataFrame(itrs['iterations'].value_counts())
vcs.index.name = 'iterations'
vcs.columns = ['count of model instances']
self.status = vcs
return vcs
def get_data_as_list(bdb, table_name, column_list=None):
if column_list is None:
sql = '''
SELECT * FROM {};
'''.format(quote(table_name))
else:
sql = '''
SELECT {} FROM {}
'''.format(', '.join(map(quote, column_list)), table_name)
cursor = bdb.sql_execute(sql)
T = cursor_to_df(cursor).values.tolist()
return T
def get_shortnames(bdb, table_name, column_names):
return get_column_descriptive_metadata(bdb, table_name, column_names,
'shortname')
def get_descriptions(bdb, table_name, column_names):
return get_column_descriptive_metadata(bdb, table_name, column_names,
'description')
def get_column_descriptive_metadata(bdb, table_name, column_names, md_field):
short_names = []
# XXX: this is indefensibly wasteful.
bql = '''
SELECT colno, name, {} FROM bayesdb_column WHERE tabname = ?
'''.format(md_field)
records = bdb.sql_execute(bql, (table_name,)).fetchall()
# hack for case sensitivity problems
column_names = [c.upper().lower() for c in column_names]
records = [(r[0], r[1].upper().lower(), r[2]) for r in records]
for cname in column_names:
for record in records:
if record[1] == cname:
sname = record[2]
if sname is None:
sname = cname
short_names.append(sname)
break
assert len(short_names) == len(column_names)
return short_names
| apache-2.0 |
CarterBain/AlephNull | alephnull/examples/buystockasfuture.py | 1 | 5516 | #!/usr/bin/env python
#
# Copyright 2013 Carter Bain Wealth Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
from datetime import datetime
import pytz
from alephnull.algorithm import TradingAlgorithm
from alephnull.utils.factory import load_from_yahoo
from pandas.core.series import TimeSeries
SYMBOL = 'GS'
TRACK = []
DAT = [None]
DIFFS = []
SHORTFALL_STRATEGY = "sell"
class BuyStock(TradingAlgorithm):
"""This is the simplest possible algorithm that does nothing but
buy 1 share of SYMBOL on each event.
"""
def add_margin(self, data):
# Uses some strategy to get the price at some bar and calculate appropriate
# initial and maintenance margins for that bar.
# Ideally we would use SPAN margining; however, based on some naive data analysis,
# the max a stock changes in a several day period (up to 30 days) is about 42%.
# Change this when you have a better strategy!
initial_margin = data[SYMBOL]['price'] * 0.42
maintenance_margin = data[SYMBOL]['price'] * 0.32
data[SYMBOL].__dict__.update({'initial_margin': initial_margin})
data[SYMBOL].__dict__.update({'maintenance_margin': maintenance_margin})
def initialize(self, *args, **kwargs):
self._first_pass = True
self.futures_results
def handle_data(self, data): # overload handle_data() method
DAT[0] = data
self.add_margin(data)
position = self.perf_tracker.cumulative_performance.positions[SYMBOL]
maintenance_margin = data[SYMBOL]['maintenance_margin']
initial_margin = data[SYMBOL]['initial_margin']
price = data[SYMBOL].price
if self._first_pass:
initial_quantity = 50
self.order(SYMBOL, initial_quantity)
position.margin += initial_margin * initial_quantity
print(position.margin)
self._first_pass = False
self.last_price = price
return
else:
DIFFS.append((self.last_price - price) / price)
quantity_owned = position.amount
margin = position.margin
# don't ask...
timestamp = next(data[0].iteritems() if type(data) is list else data.iteritems())[1]['datetime']
TRACK.append((margin, quantity_owned, timestamp))
if maintenance_margin * quantity_owned > margin:
if SHORTFALL_STRATEGY == "sell":
TRACK.append("SELL")
# sell enough so that your margin account is back above initial margin for every contract
quantity_to_sell = int(initial_margin * quantity_owned ** 2 / margin - quantity_owned) + 1
self.order(SYMBOL, -1*quantity_to_sell)
if quantity_to_sell == 0:
TRACK.append(str(timestamp) + " had a 0-sell!")
elif SHORTFALL_STRATEGY == "buffer":
# put some more money from elsewhere into the account
pass
elif margin > 1.5*(maintenance_margin * quantity_owned):
# we've got too much in margin - we need to make our money work for us!
# buy as many contracts as we can until buying another would put us under
# 1.25 * required margin
TRACK.append("BUY")
max_funds_available = margin - 1.25*(maintenance_margin * quantity_owned)
quantity_to_buy = int(max_funds_available / initial_margin)
# we don't have to update the margin because the same amount of cash is still in the margin account,
# it is just distributed over a larger number of contracts
if quantity_to_buy == 0:
TRACK.append("0 to buy, what a shame")
else:
self.order(SYMBOL, quantity_to_buy) # order SID (=0) and amount (=1 shares)
if quantity_to_buy == 0:
TRACK.append(str(timestamp) + " had a 0-sell!")
self.last_price = price
if __name__ == '__main__':
start = datetime(2008, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2013, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=[SYMBOL], indexes={}, start=start,
end=end, adjusted=True)
simple_algo = BuyStock()
results = simple_algo.run(data)
ax1 = plt.subplot(211)
ax2 = plt.subplot(212)
TRACK_STRIPPED = [x for x in TRACK if type(x) == tuple]
futures_indexes = [timestamp for (_, _, timestamp) in TRACK_STRIPPED]
futures_quantity_data = [quantity_owned for (_, quantity_owned, _) in TRACK_STRIPPED]
futures_margin_data = [margin for (margin, _, _) in TRACK_STRIPPED]
futures_margin_series = TimeSeries(index=futures_indexes, data=futures_margin_data)
futures_margin_series.plot(ax=ax1)
futures_quantity_series = TimeSeries(index=futures_indexes, data=futures_quantity_data)
futures_quantity_series.plot(ax=ax2)
plt.gcf().set_size_inches(18, 8) | apache-2.0 |
edwardb/sattrackV2 | base.py | 2 | 54414 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Basic methods to help plot and interpret experimental data
This module contains the following classes:
- :class:`ArrowLine` - A matplotlib subclass to draw an arrowhead on a line
- :class:`Quantity` - Named tuple class for a constant physical quantity
and the following functions:
- :meth:`add_arrows` - Overlays arrows with annotations on top of a pre-plotted
line
- :meth:`add_hlines` - Adds horizontal lines to a set of axes with optional
labels
- :meth:`add_vlines` - Adds vertical lines to a set of axes with optional
labels
- :meth:`animate` - Encodes a series of PNG images as a MPG movie
- :meth:`color` - Plots 2D scalar data on a color axis in 2D Cartesian
coordinates
- :meth:`closeall` - Closes all open figures
- :meth:`convert` - Converts the expression of a physical quantity between units
- :meth:`expand_path` - Expands a file path by replacing '~' with the user
directory and makes the path absolute
- :meth:`flatten_dict` - Flattens a nested dictionary
- :meth:`flatten_list` - Flattens a nested list
- :meth:`figure` - Creates a figure and set its label
- :meth:`get_indices` - Returns the pair of indices that bound a target value in
a monotonically increasing vector
- :meth:`get_pow10` - Returns the exponent of 10 for which the significand
of a number is within the range [1, 10)
- :meth:`get_pow1000` - Returns the exponent of 1000 for which the
significand of a number is within the range [1, 1000)
- :meth:`load_csv` - Loads a CSV file into a dictionary
- :meth:`plot` - Plots 1D scalar data as points and/or line segments in 2D
Cartesian coordinates
- :meth:`quiver` - Plots 2D vector data as arrows in 2D Cartesian coordinates
- :meth:`save` - Saves the current figures as images in a format or list of
formats
- :meth:`saveall` - Saves all open figures as images in a format or list of
formats
- :meth:`setup_subplots` - Creates an array of subplots and return their axes
- :meth:`shift_scale_x` - Applies an offset and a factor as necessary to the x
axis
- :meth:`shift_scale_y` - Applies an offset and a factor as necessary to the y
axis
"""
__author__ = "Kevin Davies"
__email__ = "[email protected]"
__credits__ = ["Jason Grout", "Jason Heeris"]
__copyright__ = "Copyright 2012-2013, Georgia Tech Research Corporation"
__license__ = "BSD-compatible (see LICENSE.txt)"
import os
import wx
import numpy as np
import matplotlib.pyplot as plt
from collections import MutableMapping, namedtuple
from itertools import cycle
from decimal import Decimal
from math import floor
from matplotlib import rcParams
from matplotlib.lines import Line2D
from matplotlib.cbook import iterable
Quantity = namedtuple('Quantity', ['number', 'factor', 'offset', 'unit'])
"""Named tuple class for a constant physical quantity
The factor and then the offset are applied to the number to arrive at the
quantity expressed in terms of the unit.
"""
# Create a class to contain information about a unit conversion.
#Conversion = namedtuple('Conversion', ['unit', 'factor', 'offset', 'new_unit'])
def add_arrows(p, x_locs=[0], xstar_offset=0, ystar_offset=0,
lstar=0.05, label='',
orientation='tangent', color='r'):
r"""Overlay arrows with annotations on top of a pre-plotted line.
**Arguments:**
- *p*: A plot instance (:class:`matplotlib.lines.Line2D` object)
- *x_locs*: x-axis locations of the arrows
- *xstar_offset*: Normalized x-axis offset from the middle of the arrow to
the text
- *ystar_offset*: Normalized y-axis offset from the middle of the arrow to
the text
- *lstar*: Length of each arrow in normalized xy axes
- *label*: Annotation text
- *orientation*: 'tangent', 'horizontal', or 'vertical'
- *color*: Color of the arrows (from :mod:`matplotlib.colors`)
**Example:**
.. code-block:: python
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from modelicares import *
>>> # Create a plot.
>>> figure('examples/add_arrows') # doctest: +ELLIPSIS
<matplotlib.figure.Figure object at 0x...>
>>> x = np.arange(100)
>>> p = plt.plot(x, np.sin(x/4.0))
>>> # Add arrows and annotations.
>>> add_arrows(p[0], x_locs=x.take(np.arange(20,100,20)),
... label="Incr. time", xstar_offset=-0.15)
>>> save()
Saved examples/add_arrows.pdf
Saved examples/add_arrows.png
>>> plt.show()
.. only:: html
.. image:: ../examples/add_arrows.png
:scale: 70 %
:alt: example of add_arrows()
.. only:: latex
.. figure:: ../examples/add_arrows.pdf
:scale: 70 %
Example of add_arrows()
"""
from math import atan, cos, sin
# Get data from the plot lines object.
x_dat = plt.getp(p, 'xdata')
y_dat = plt.getp(p, 'ydata')
ax = p.get_axes()
Deltax = np.diff(ax.get_xlim())[0]
Deltay = np.diff(ax.get_ylim())[0]
for x_loc in x_locs:
# Get two unique indices.
i_a, i_b = get_indices(x_dat, x_loc)
if i_a == i_b:
if i_a > 0:
i_a -= 1
if i_b < len(x_dat):
i_b += 1
# Find the midpoint and x, y lengths of the arrow such that it has the
# given normalized length.
x_pts = x_dat.take([i_a, i_b])
y_pts = y_dat.take([i_a, i_b])
if orientation == 'vertical':
dx = lstar*Deltax
dy = 0
elif orientation == 'horizontal':
dx = 0
dy = lstar*Deltay
else: # tangent
theta = atan((y_pts[1] - y_pts[0])*Deltax/((x_pts[1] -
x_pts[0])*Deltay))
dx = lstar*Deltax*cos(theta)
dy = lstar*Deltay*sin(theta)
x_mid = sum(x_pts)/2
y_mid = sum(y_pts)/2
# Add the arrow and text.
line = ArrowLine([x_mid - dx, x_mid + dx], [y_mid - dy, y_mid + dy],
color=color, arrowfacecolor=color,
arrowedgecolor=color, ls='-', lw=3, arrow='>',
arrowsize=10)
ax.add_line(line)
if label:
ax.text(x_mid + xstar_offset*Deltax, y_mid + ystar_offset*Deltax,
s=label, fontsize=12)
def add_hlines(ax=None, positions=[0], labels=[], **kwargs):
r"""Add horizontal lines to a set of axes with optional labels.
**Arguments:**
- *ax*: Axes (:class:`matplotlib.axes` object)
- *positions*: Positions (along the x axis)
- *labels*: List of labels for the lines
- *\*\*kwargs*: Line properties (propagated to
:meth:`matplotlib.pyplot.axhline`)
E.g., ``color='k', linestyle='--', linewidth=0.5``
**Example:**
.. code-block:: python
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from modelicares import *
>>> # Create a plot.
>>> figure('examples/add_hlines') # doctest: +ELLIPSIS
<matplotlib.figure.Figure object at 0x...>
>>> x = np.arange(100)
>>> y = np.sin(x/4.0)
>>> plt.plot(x, y) # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-1.2, 1.2])
(-1.2, 1.2)
>>> # Add horizontal lines and labels.
>>> add_hlines(positions=[min(y), max(y)], labels=["min", "max"],
... color='r', ls='--')
>>> save()
Saved examples/add_hlines.pdf
Saved examples/add_hlines.png
>>> plt.show()
.. only:: html
.. image:: ../examples/add_hlines.png
:scale: 70 %
:alt: example of add_hlines()
.. only:: latex
.. figure:: ../examples/add_hlines.pdf
:scale: 70 %
Example of add_hlines()
"""
# Process the inputs.
if not ax:
ax = plt.gca()
if not iterable(positions):
xpositions = (xpositions,)
if not iterable(labels):
labels = (labels,)
# Add and label lines.
for position in positions:
ax.axhline(y=position, **kwargs)
xpos = sum(ax.axis()[0:2])/2.0
for i, label in enumerate(labels):
ax.text(xpos, positions[i], label, backgroundcolor='w',
horizontalalignment='center', verticalalignment='center')
def add_vlines(ax=None, positions=[0], labels=[], **kwargs):
"""Add vertical lines to a set of axes with optional labels.
**Arguments:**
- *ax*: Axes (matplotlib.axes object)
- *positions*: Positions (along the x axis)
- *labels*: List of labels for the lines
- *\*\*kwargs*: Line properties (propagated to
:meth:`matplotlib.pyplot.axvline`)
E.g., ``color='k', linestyle='--', linewidth=0.5``
**Example:**
.. code-block:: python
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from modelicares import *
>>> # Create a plot.
>>> figure('examples/add_vlines') # doctest: +ELLIPSIS
<matplotlib.figure.Figure object at 0x...>
>>> x = np.arange(100)
>>> y = np.sin(x/4.0)
>>> plt.plot(x, y) # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-1.2, 1.2])
(-1.2, 1.2)
>>> # Add horizontal lines and labels.
>>> add_vlines(positions=[25, 50, 75], labels=["A", "B", "C"],
... color='k', ls='--')
>>> save()
Saved examples/add_vlines.pdf
Saved examples/add_vlines.png
>>> plt.show()
.. only:: html
.. image:: ../examples/add_vlines.png
:scale: 70 %
:alt: example of add_vlines()
.. only:: latex
.. figure:: ../examples/add_vlines.pdf
:scale: 70 %
Example of add_vlines()
"""
# Process the inputs.
if not ax:
ax = plt.gca()
if not iterable(positions):
positions = (positions,)
if not iterable(labels):
labels = (labels,)
# Add and label lines.
for position in positions:
ax.axvline(x=position, **kwargs)
ypos = sum(ax.axis()[2::])/2.0
for i, label in enumerate(labels):
ax.text(positions[i], ypos, label, backgroundcolor='w',
horizontalalignment='center', verticalalignment='center')
def animate(imagebase='_tmp', fname="animation", fps=10, clean=False):
"""Encode a series of PNG images as a MPG movie.
**Arguments:**
- *imagebase*: Base filename for the PNG images
The images should be located in the current directory as an
"*imagebase**xx*.png" sequence, where *xx* is a frame index.
- *fname*: Filename for the movie
".mpg" will be appended if necessary.
- *fps*: Number of frames per second
- *clean*: *True*, if the PNG images should be deleted afterward
.. Note:: This function requires mencoder_. On Linux, install it with the
following command: ``sudo apt-get install mencoder``. Currently, this
function is not supported on Windows.
.. _mencoder: http://en.wikipedia.org/wiki/MEncoder
**Example:**
.. code-block:: python
import matplotlib.pyplot as plt
from numpy.random import rand
from modelicares import *
# Create the frames.
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
for i in range(50): # 50 frames
ax.cla()
ax.imshow(rand(5,5), interpolation='nearest')
fname = '_tmp%02d.png' % i
print("Saving frame %i (file %s)" % (i, fname))
fig.savefig(fname) # doctest: +ELLIPSIS
# Assemble the frames into a movie.
animate(clean=True)
"""
# Note: The output of the code above is too large for inline doctest.
# TODO: Consider using the animation module from matplotlib. Should it
# supercede this function?
# TODO: Add support for Windows.
# Based on
# http://matplotlib.sourceforge.net/faq/howto_faq.html#make-a-movie,
# accessed 11/2/10
if not fname.lower().endswith('.mpg'):
fname += '.mpg'
print('Making movie "%s". This may take a while.' % fname)
os.system("mencoder 'mf://%s*.png' -mf type=png:fps=%i -ovc lavc "
"-lavcopts vcodec=wmv2 -oac copy -o %s"%(imagebase, fps, fname))
if clean:
from glob import glob
for image in glob(imagebase + '*.png'):
os.remove(image)
def color(ax, c, *args, **kwargs):
"""Plot 2D scalar data on a color axis in 2D Cartesian coordinates.
This uses a uniform grid.
**Arguments:**
- *ax*: Axis onto which the data should be plotted
- *c*: color- or c-axis data (2D array)
- *\*args*, *\*\*kwargs*: Additional arguments for
:meth:`matplotlib.pyplot.imshow`
**Example:**
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from modelicares import *
>>> figure('examples/color') # doctest: +ELLIPSIS
<matplotlib.figure.Figure object at 0x...>
>>> x, y = np.meshgrid(np.arange(0, 2*np.pi, 0.2),
... np.arange(0, 2*np.pi, 0.2))
>>> c = np.cos(x) + np.sin(y)
>>> ax = plt.subplot(111)
>>> color(ax, c) # doctest: +ELLIPSIS
<matplotlib.image.AxesImage object at 0x...>
>>> save()
Saved examples/color.pdf
Saved examples/color.png
>>> plt.show()
.. only:: html
.. image:: ../examples/color.png
:scale: 70 %
:alt: example of color()
.. only:: latex
.. figure:: ../examples/color.pdf
:scale: 70 %
Example of color()
"""
return ax.imshow(c, *args, **kwargs)
def closeall():
"""Close all open figures.
This is a shortcut for the following:
>>> from matplotlib._pylab_helpers import Gcf
>>> Gcf.destroy_all()
"""
from matplotlib._pylab_helpers import Gcf
Gcf.destroy_all()
#for manager in Gcf.get_all_fig_managers():
# manager.canvas.figure.close()
#plt.close("all")
def convert(quantity):
"""Convert the expression of a physical quantity between units.
**Arguments:**
- *quantity*: Instance of :class:`Quantity`
**Example:**
.. code-block:: python
>>> from modelicares import *
>>> T = 293.15 # Temperature in K
>>> T_degC = convert(Quantity(T, factor=1, offset=-273.15, unit='C'))
>>> print(str(T) + " K is " + str(T_degC) + " degC.")
293.15 K is 20.0 degC.
"""
return quantity.number*quantity.factor + quantity.offset
def expand_path(path):
r"""Expand a file path by replacing '~' with the user directory and making
the path absolute.
**Example:**
.. code-block:: python
>>> from modelicares import *
>>> expand_path('~/Documents') # doctest: +ELLIPSIS
'...Documents'
>>> # where ... is '/home/user/' on Linux or 'C:\Users\user\' on
>>> # Windows (and "user" is the user id).
"""
return os.path.abspath(os.path.expanduser(path))
def flatten_dict(d, parent_key='', separator='.'):
"""Flatten a nested dictionary.
**Arguments:**
- *d*: Dictionary (may be nested to an arbitrary depth)
- *parent_key*: Key of the parent dictionary, if any
- *separator*: String or character that joins elements of the keys or path
names
**Example:**
>>> from modelicares import *
>>> flatten_dict(dict(a=1, b=dict(c=2, d='hello')))
{'a': 1, 'b.c': 2, 'b.d': 'hello'}
"""
# From
# http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys,
# 11/5/2012
items = []
for key, value in d.items():
new_key = parent_key + separator + key if parent_key else key
if isinstance(value, MutableMapping):
items.extend(flatten_dict(value, new_key).items())
else:
items.append((new_key, value))
return dict(items)
def flatten_list(l, ltypes=(list, tuple)):
"""Flatten a nested list.
**Arguments:**
- *l*: List (may be nested to an arbitrary depth)
If the type of *l* is not in ltypes, then it is placed in a list.
- *ltypes*: Tuple (not list) of accepted indexable types
**Example:**
>>> from modelicares import *
>>> flatten_list([1, [2, 3, [4]]])
[1, 2, 3, 4]
"""
# Based on
# http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html,
# 10/28/2011
ltype = type(l)
if ltype not in ltypes: # So that strings aren't split into characters
return [l]
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if l[i]:
l[i:i + 1] = l[i]
else:
l.pop(i)
i -= 1
break
i += 1
return ltype(l)
def figure(label='', *args, **kwargs):
"""Create a figure and set its label.
**Arguments:**
- *label*: String to apply to the figure's *label* property
- *\*args*, *\*\*kwargs*: Additional arguments for
:meth:`matplotlib.pyplot.figure`
**Example:**
.. code-block:: python
>>> fig = figure("velocity_vs_time") # doctest: +ELLIPSIS
>>> plt.getp(fig, 'label')
'velocity_vs_time'
.. Note:: The *label* property is used as the base filename in the
:meth:`saveall` method.
"""
fig = plt.figure(*args, **kwargs)
plt.setp(fig, 'label', label)
# Note: As of matplotlib 1.2, matplotlib.pyplot.figure(label=label) isn't
# supported directly.
return fig
def _gen_offset_factor(label, tick_lo, tick_up, eagerness=0.325):
"""Apply an offset and a scaling factor to a label if necessary.
**Arguments:**
- *tick_lo*: Lower tick value
- *tick_up*: Upper tick value
- *eagerness*: Parameter to adjust how little of an offset is required
before the label will be recentered
- 0: Offset is never applied.
- 1: Offset is always applied if it will help.
**Returns:**
1. New label (label)
2. Offset (offset)
3. Exponent of 1000 which can be factored from the number (pow1000)
"""
# TODO: Utilize matplotlib's support for units.
def _label_offset_factor(label, offset_factor, offset_pow1000, pow1000):
"""Format an offset and factor into a LaTeX string and add to it an
existing string.
"""
DIVIDE = r'\,/\,' # LaTeX string for division
# Add the offset string.
if offset_factor:
if DIVIDE in label:
label = label.rstrip(r'$') + r'\,-\,%i$' % offset_factor
else:
label += r'$\,-\,%i$' % offset_factor
if offset_pow1000:
label = label.rstrip(r'$') + (r'\times10^{%i}$' %
(3*offset_pow1000))
# Add the scaling notation.
if pow1000:
if offset_factor:
label = (r'$($' + label.rstrip(r'$') + r')' + DIVIDE +
r'10^{%i}$' % (3*pow1000))
else:
if DIVIDE in label:
desc, unit = label.split(DIVIDE, 1)
if unit.endswith(r')$'):
label = (desc + DIVIDE + r'(10^{%i}' % (3*pow1000) +
unit.lstrip(r'('))
else:
label = (desc + DIVIDE + r'(10^{%i}' % (3*pow1000) +
unit.rstrip(r'$') + r')$')
else:
label += r'$' + DIVIDE + r'10^{%i}$' % (3*pow1000)
return label
offset = 0
offset_factor = 0
offset_pow1000 = 1
outside = min(tick_lo, 0) + max(tick_up, 0)
if outside != 0:
inside = max(tick_lo, 0) + min(tick_up, 0)
if inside/outside > 1 - eagerness:
offset = inside - np.mod(inside, 1000**get_pow1000(inside))
offset_pow1000 = get_pow1000(offset)
offset_factor = offset/1000**offset_pow1000
outside = min(tick_lo - offset, 0) + max(tick_up - offset, 0)
pow1000 = get_pow1000(outside)
label = _label_offset_factor(label, offset_factor, offset_pow1000, pow1000)
return label, offset, pow1000
def get_indices(x, target):
"""Return the pair of indices that bound a target value in a monotonically
increasing vector.
**Arguments:**
- *x*: Vector
- *target*: Target value
**Example:**
>>> from modelicares import *
>>> get_indices([0,1,2],1.6)
(1, 2)
"""
if target <= x[0]:
return 0, 0
if target >= x[-1]:
i = len(x) - 1
return i, i
else:
i_1 = 0
i_2 = len(x) - 1
while i_1 < i_2 - 1:
i_mid = int(np.floor((i_1 + i_2)/2))
if x[i_mid] == target:
return i_mid, i_mid
elif x[i_mid] > target:
i_2 = i_mid
else:
i_1 = i_mid
return i_1, i_2
def get_pow10(num):
"""Return the exponent of 10 for which the significand of a number is
within the range [1, 10).
**Example:**
>>> get_pow10(50)
1
"""
# Based on an algorithm by Jason Heeris 11/18/2009:
#
dnum = Decimal(str(num))
if dnum == 0:
return 0
elif dnum < 0:
dnum = -dnum
return int(floor(dnum.log10()))
def get_pow1000(num):
"""Return the exponent of 1000 for which the significand of a number is
within the range [1, 1000).
**Example:**
>>> get_pow1000(1e5)
1
"""
# Based on an algorithm by Jason Heeris 11/18/2009:
# http://www.mail-archive.com/[email protected]/msg14433.html
dnum = Decimal(str(num))
if dnum == 0:
return 0
elif dnum < 0:
dnum = -dnum
return int(floor(dnum.log10()/3))
def load_csv(fname, header_row=0, first_data_row=None, types=None, **kwargs):
"""Load a CSV file into a dictionary.
The strings from the header row are used as dictionary keys.
**Arguments:**
- *fname*: Path and name of the file
- *header_row*: Row that contains the keys (uses zero-based indexing)
- *first_data_row*: First row of data (uses zero-based indexing)
If *first_data_row* is not provided, then it is assumed that the data
starts just after the header row.
- *types*: List of data types for each column
:class:`int` and :class:`float` data types will be cast into a
:class:`numpy.array`. If *types* is not provided, attempts will be
made to cast each column into :class:`int`, :class:`float`, and
:class:`str` (in that order).
- *\*\*kwargs*: Additional arguments for :meth:`csv.reader`
**Example:**
>>> from modelicares import *
>>> data = load_csv("examples/load-csv.csv", header_row=2)
>>> print("The keys are: %s" % data.keys())
The keys are: ['Price', 'Description', 'Make', 'Model', 'Year']
"""
import csv
try:
reader = csv.reader(open(fname), **kwargs)
except IOError:
print('Unable to load "%s". Check that it exists.' % fname)
return
# Read the header row and create the dictionary from it.
for i in range(header_row):
reader.next()
keys = reader.next()
data = dict.fromkeys(keys)
#print("The keys are: ")
#print(keys)
# Read the data.
if first_data_row:
for row in range(first_data_row - header_row - 1):
reader.next()
if types:
for i, (key, column, t) in enumerate(zip(keys, zip(*reader), types)):
# zip(*reader) groups the data by columns.
try:
if isinstance(t, basestring):
data[key] = column
elif isinstance(t, (float, int)):
data[key] = np.array(map(t, column))
else:
data[key] = map(t, column)
except ValueError:
print("Could not cast column %i into %i." % (i, t))
return
else:
for key, column in zip(keys, zip(*reader)):
try:
data[key] = np.array(map(int, column))
except:
try:
data[key] = np.array(map(float, column))
except:
data[key] = map(str, column)
return data
def plot(y, x=None, ax=None, label=None,
color=['b', 'g', 'r', 'c', 'm', 'y', 'k'],
marker=None,
dashes=[(None,None), (3,3), (1,1), (3,2,1,2)],
**kwargs):
"""Plot 1D scalar data as points and/or line segments in 2D Cartesian
coordinates.
This is similar to :meth:`matplotlib.pyplot.plot` (and actually calls that
method), but provides direct support for plotting an arbitrary number of
curves.
**Arguments:**
- *y*: y-axis data
This may contain multiple series.
- *x*: x-axis data
If *x* is not provided, the y-axis data will be plotted versus its
indices. If *x* is a single series, it will be used for all of the
y-axis series. If it is a list of series, each x-axis series will be
matched to a y-axis series.
- *ax*: Axis onto which the data should be plotted.
If *ax* is *None* (default), axes are created.
- *label*: List of labels of each series (to be used later for the legend
if applied)
- *color*: Single entry, list, or :class:`itertools.cycle` of colors that
will be used sequentially
Each entry may be a character, grayscale, or rgb value.
.. Seealso:: http://matplotlib.sourceforge.net/api/colors_api.html
- *marker*: Single entry, list, or :class:`itertools.cycle` of markers that
will be used sequentially
Use *None* for no marker. A good assortment is ["o", "v", "^", "<",
">", "s", "p", "*", "h", "H", "D", "d"]. All of the possible entries
are listed at:
http://matplotlib.sourceforge.net/api/artist_api.html#matplotlib.lines.Line2D.set_marker.
- *dashes*: Single entry, list, or :class:`itertools.cycle` of dash styles
that will be used sequentially
Each style is a tuple of on/off lengths representing dashes. Use
(0, 1) for no line and (None, None) for a solid line.
.. Seealso:: http://matplotlib.sourceforge.net/api/collections_api.html
- *\*\*kwargs*: Additional arguments for :meth:`matplotlib.pyplot.plot`
**Returns:** List of :class:`matplotlib.lines.Line2D` objects
**Example:**
.. testsetup::
>>> closeall()
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from modelicares import *
>>> figure('examples/plot') # doctest: +ELLIPSIS
<matplotlib.figure.Figure object at 0x...>
>>> ax = plt.subplot(111)
>>> plot([range(11), range(10, -1, -1)], ax=ax) # doctest: +ELLIPSIS
[[<matplotlib.lines.Line2D object at 0x...>], [<matplotlib.lines.Line2D object at 0x...>]]
>>> save()
Saved examples/plot.pdf
Saved examples/plot.png
>>> plt.show()
.. only:: html
.. image:: ../examples/plot.png
:scale: 70 %
:alt: example of plot()
.. only:: latex
.. figure:: ../examples/plot.pdf
:scale: 70 %
Example of plot()
"""
# Create axes if necessary.
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
# Set up the color(s), marker(s), and dash style(s).
cyc = type(cycle([]))
if not isinstance(color, cyc):
if not iterable(color):
color = [color]
color = cycle(color)
if not isinstance(marker, cyc):
if not iterable(marker):
marker = [marker]
marker = cycle(marker)
if not isinstance(dashes, cyc):
if not iterable(dashes[0]):
dashes = [dashes]
dashes = cycle(dashes)
# 6/5/11: There is an ax.set_color_cycle() method that could be used, but
# there doesn't seem to be a corresponding set_line_cycle() or
# set_marker_cycle().
# 10/27/11: There may be a way to do this automatically. See:
# http://matplotlib.sourceforge.net/api/collections_api.html
# Plot the data.
if x is None:
# There is no x data; plot y vs its indices.
plots = [ax.plot(yi, label=None if label is None else label[i],
color=color.next(), marker=marker.next(),
dashes=dashes.next(), **kwargs)
for i, yi in enumerate(y)]
elif not iterable(x[0]):
# There is only one x series; use it repeatedly.
plots = [ax.plot(x, yi, label=None if label is None else label[i],
color=color.next(), marker=marker.next(),
dashes=dashes.next(), **kwargs)
for i, yi in enumerate(y)]
else:
# There is a x series for each y series.
plots = [ax.plot(xi, yi, label=None if label is None else label[i],
color=color.next(), marker=marker.next(),
dashes=dashes.next(), **kwargs)
for i, (xi, yi) in enumerate(zip(x, y))]
return plots
def quiver(ax, u, v, x=None, y=None, pad=0.05, pivot='middle', **kwargs):
"""Plot 2D vector data as arrows in 2D Cartesian coordinates.
Uses a uniform grid.
**Arguments:**
- *ax*: Axis onto which the data should be plotted
- *u*: x-direction values (2D array)
- *v*: y-direction values (2D array)
- *pad*: Amount of white space around the data (relative to the span of the
field)
- *pivot*: "tail" | "middle" | "tip" (see :meth:`matplotlib.pyplot.quiver`)
- *\*\*kwargs*: Additional arguments for :meth:`matplotlib.pyplot.quiver`
**Example:**
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from modelicares import *
>>> figure('examples/quiver') # doctest: +ELLIPSIS
<matplotlib.figure.Figure object at 0x...>
>>> x, y = np.meshgrid(np.arange(0, 2*np.pi, 0.2),
... np.arange(0, 2*np.pi, 0.2))
>>> u = np.cos(x)
>>> v = np.sin(y)
>>> ax = plt.subplot(111)
>>> quiver(ax, u, v) # doctest: +ELLIPSIS
<matplotlib.quiver.Quiver object at 0x...>
>>> save()
Saved examples/quiver.pdf
Saved examples/quiver.png
>>> plt.show()
.. only:: html
.. image:: ../examples/quiver.png
:scale: 70 %
:alt: example of quiver()
.. only:: latex
.. figure:: ../examples/quiver.pdf
:scale: 70 %
Example of quiver()
"""
if x is None or y is None:
p = ax.quiver(u, v, pivot=pivot, **kwargs)
else:
p = ax.quiver(x, y, u, v, pivot=pivot, **kwargs)
plt.axis('tight')
l, r, b, t = plt.axis()
dx, dy = r-l, t-b
plt.axis([l-pad*dx, r+pad*dx, b-pad*dy, t+pad*dy])
return p
def save(formats=['pdf', 'png'], fbase='1'):
"""Save the current figures as images in a format or list of formats.
The directory and base filenames are taken from the *label* property of the
figures. A slash ("/") can be used as a path separator, even if the
operating system is Windows. Folders are created as needed. If the *label*
property is empty, then a directory dialog is opened to chose a directory.
**Arguments:**
- *formats*: Format or list of formats in which the figure should be saved
- *fbase*: Default directory and base filename
This is used if the *label* attribute of the figure is empty ('').
.. Note:: In general, :meth:`save` should be called before
:meth:`matplotlib.pyplot.show` so that the figure(s) are still present
in memory.
**Example:**
.. testsetup::
>>> closeall()
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> from modelicares import *
>>> figure('temp_plot') # doctest: +ELLIPSIS
<matplotlib.figure.Figure object at 0x...>
>>> plt.plot(range(10)) # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
>>> save()
Saved temp_plot.pdf
Saved temp_plot.png
.. Note:: The :meth:`figure` method can be used to directly create a
figure with a label.
"""
from wx import DirSelector, App
# Initialize a dummy wx.App instance. Dialogs can only be called after
# this is done [http://warp.byu.edu/site/content/131, accessed 10/9/2012].
app = App()
# If formats is a singleton, turn it into a list.
if not type(formats) is list:
formats = [formats,]
# Find the figure.
fig = plt.gcf()
# Save the figures, creating folders as necessary.
(directory, fbase_fig) = os.path.split(plt.getp(fig, 'label'))
if not fbase_fig:
if not directory:
# Initialize a dummy wx.App instance. Dialogs can only be
# called after this is done
# [http://code.google.com/p/easywx/, accessed 10/7/2012].
#app = App()
directory = DirSelector("Choose a directory for the images.",
defaultPath=os.path.join(*['..']*4))
if not directory:
return
else:
fbase = fbase_fig
if directory and not os.path.isdir(directory):
os.mkdir(directory)
for format in formats:
fname = os.path.join(directory, fbase + '.' + format)
fig.savefig(fname, format=format)
print("Saved " + fname)
def saveall(formats=['pdf', 'png']):
"""Save all open figures as images in a format or list of formats.
The directory and base filenames are taken from the *label* property of the
figures. A slash ("/") can be used as a path separator, even if the
operating system is Windows. Folders are created as needed. If the *label*
property is empty, then a directory dialog is opened to chose a directory.
In that case, the figures are saved as a sequence of numbers.
**Arguments:**
- *formats*: Format or list of formats in which the figures should be saved
.. Note:: In general, :meth:`saveall` should be called before
:meth:`matplotlib.pyplot.show` so that the figure(s) are still present
in memory.
**Example:**
.. testsetup::
>>> closeall()
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> from modelicares import *
>>> figure('temp_plot') # doctest: +ELLIPSIS
<matplotlib.figure.Figure object at 0x...>
>>> plt.plot(range(10)) # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
>>> save()
Saved temp_plot.pdf
Saved temp_plot.png
.. Note:: The :meth:`figure` method can be used to directly create a
figure with a label.
"""
from matplotlib._pylab_helpers import Gcf
from wx import DirSelector, App
# Initialize a dummy wx.App instance. Dialogs can only be called after
# this is done [http://warp.byu.edu/site/content/131, accessed 10/9/2012].
app = App()
# If formats is a singleton, turn it into a list.
if not type(formats) is list:
formats = [formats,]
# Find the figures.
figs = [manager.canvas.figure for manager in Gcf.get_all_fig_managers()]
# Save the figures, creating folders as necessary.
chosen_directory = None
i = 0
for fig in figs:
(directory, fbase) = os.path.split(plt.getp(fig, 'label'))
if not fbase:
fbase = str(i)
i += 1
if not directory:
if chosen_directory is None:
# Initialize a dummy wx.App instance. Dialogs can only be
# called after this is done
# [http://code.google.com/p/easywx/, accessed 10/7/2012].
#app = App()
chosen_directory = DirSelector(
"Choose a directory for the images.",
defaultPath=os.path.join(*['..']*4))
if not chosen_directory:
return
directory = chosen_directory
if directory and not os.path.isdir(directory):
os.mkdir(directory)
for format in formats:
fname = os.path.join(directory, fbase + '.' + format)
fig.savefig(fname, format=format)
print("Saved " + fname)
def setup_subplots(n_plots, n_rows, title="", subtitles=None,
label="multiplot",
xlabel="", xticklabels=None, xticks=None,
ylabel="", yticklabels=None, yticks=None,
ctype=None, clabel="",
margin_left=rcParams['figure.subplot.left'],
margin_right=1-rcParams['figure.subplot.right'],
margin_bottom=rcParams['figure.subplot.bottom'],
margin_top=1-rcParams['figure.subplot.top'],
margin_cbar=0.2,
wspace=0.1, hspace=0.25,
cbar_space=0.1, cbar_width=0.05):
"""Create an array of subplots and return their axes.
**Arguments:**
- *n_plots*: Number of (sub)plots
- *n_rows*: Number of rows of (sub)plots
- *title*: Title for the figure
- *subtitles*: List of subtitles (i.e., titles for each subplot) or *None*
for no subtitles
- *label*: Label for the figure
This will be used as a base filename if the figure is saved.
- *xlabel*: Label for the x-axes (only shown for the subplots on the bottom
row)
- *xticklabels*: Labels for the x-axis ticks (only shown for the subplots
on the bottom row)
If *None*, then the default is used.
- *xticks*: Positions of the x-axis ticks
If *None*, then the default is used.
- *ylabel*: Label for the y-axis (only shown for the subplots on the left
column)
- *yticklabels*: Labels for the y-axis ticks (only shown for the subplots
on the left column)
If *None*, then the default is used.
- *yticks*: Positions of the y-axis ticks
If *None*, then the default is used.
- *ctype*: Type of colorbar (*None*, 'vertical', or 'horizontal')
- *clabel*: Label for the color- or c-bar axis
- *margin_left*: Left margin
- *margin_right*: Right margin (ignored if
``cbar_orientation == 'vertical'``)
- *margin_bottom*: Bottom margin (ignored if
``cbar_orientation == 'horizontal'``)
- *margin_top*: Top margin
- *margin_cbar*: Margin reserved for the colorbar (right margin if
``cbar_orientation == 'vertical'`` and bottom margin if
``cbar_orientation == 'horizontal'``)
- *wspace*: The amount of width reserved for blank space between subplots
- *hspace*: The amount of height reserved for white space between subplots
- *cbar_space*: Space between the subplot rectangles and the colorbar
If *cbar* is *None*, then this is ignored.
- *cbar_width*: Width of the colorbar if vertical (or height if horizontal)
If *cbar* is *None*, then this is ignored.
**Returns:**
1. List of subplot axes
2. Colorbar axis (returned iff ``cbar != None``)
3. Number of columns of subplots
**Example:**
.. testsetup::
>>> closeall()
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> from modelicares import *
>>> setup_subplots(4, 2, label='examples/setup_subplots') # doctest: +ELLIPSIS
([<matplotlib.axes._subplots.AxesSubplot object at 0x...>, <matplotlib.axes._subplots.AxesSubplot object at 0x...>, <matplotlib.axes._subplots.AxesSubplot object at 0x...>, <matplotlib.axes._subplots.AxesSubplot object at 0x...>], 2)
>>> save()
Saved examples/setup_subplots.pdf
Saved examples/setup_subplots.png
>>> plt.show()
.. only:: html
.. image:: ../examples/setup_subplots.png
:scale: 70 %
:alt: example of setup_subplots()
.. only:: latex
.. figure:: ../examples/setup_subplots.pdf
:scale: 70 %
Example of setup_subplots()
"""
from matplotlib.figure import SubplotParams
assert ctype == 'vertical' or ctype == 'horizontal' or ctype is None, \
"cytpe must be 'vertical', 'horizontal', or None."
# Create the figure.
subplotpars = SubplotParams(left=margin_left, top=1-margin_top,
right=1-(margin_cbar if ctype == 'vertical' else margin_right),
bottom=(margin_cbar if ctype == 'horizontal' else margin_bottom),
wspace=wspace, hspace=hspace)
fig = figure(label, subplotpars=subplotpars)
fig.suptitle(t=title, fontsize=rcParams['axes.titlesize'])
# For some reason, the suptitle() function doesn't automatically follow the
# default title font size.
# Add the subplots.
# Provide at least as many panels as needed.
n_cols = int(np.ceil(float(n_plots)/n_rows))
ax = []
for i in range(n_plots):
# Create the axes.
i_col = np.mod(i, n_cols)
i_row = (i - i_col)/n_cols
a = fig.add_subplot(n_rows, n_cols, i+1)
ax.append(a)
# Scale and label the axes.
if xticks is not None:
a.set_xticks(xticks)
if yticks is not None:
a.set_yticks(yticks)
# Only show the xlabel and xticklabels for the bottom row.
if True:#i_row == n_rows - 1:
a.set_xlabel(xlabel)
if xticklabels is not None:
a.set_xticklabels(xticklabels)
else:
a.set_xticklabels([])
# Only show the ylabel and yticklabels for the left column.
if i_col == 0:
a.set_ylabel(ylabel)
if yticklabels is not None:
a.set_yticklabels(yticklabels)
else:
a.set_yticklabels([])
# Add the subplot title.
if subtitles:
a.set_title(subtitles[i], fontsize=rcParams['axes.labelsize'])
# Add the colorbar.
if ctype:
if ctype == 'vertical':
#fig.subplots_adjust(left=margin_left, bottom=margin_top,
# right=1-margin_cbar, top=1-margin_top,
# wspace=wspace, hspace=hspace)
cax = fig.add_axes([1 - margin_cbar + cbar_space, margin_bottom,
cbar_width, 1 - margin_bottom - margin_top])
#cax.set_ylabel(clabel)
else:
#fig.subplots_adjust(left=margin_left, bottom=margin_cbar,
# right=1-margin_left, top=1-margin_top,
# wspace=wspace, hspace=hspace)
cax = fig.add_axes([margin_left,
margin_cbar - cbar_space - cbar_width,
1 - margin_left - margin_right, cbar_width])
#cax.set_xlabel(clabel)
cax.set_ylabel(clabel)
return ax, cax, n_cols
else:
return ax, n_cols
# TODO: Remove the "_" prefix once this is tested and ready.
def _shift_scale_c(cbar, v_min, v_max, eagerness=0.325):
""""Apply an offset and a factor as necessary to the colorbar.
**Arguments:**
- *cbar*: :class:`matplotlib.colorbar.Colorbar` object
- *v_min*: Minimum of the color-axis data
- *v_max*: Maximum of the color-axis data
- *eagerness*: Parameter to adjust how little of an offset is required
before the label will be recentered
- 0: Offset is never applied.
- 1: Offset is always applied if it will help.
"""
# TODO: Provide an example.
# The concept here is based on:
# http://efreedom.com/Question/1-3677368/Matplotlib-Format-Axis-Offset-Values-Whole-Numbers-Specific-Number
# accessed 2010/11/10
label = cbar.ax.get_ylabel()
ticks = cbar.ax.get_yticks()
offset, offset_factor, offset_pow1000, pow1000 = _gen_offset_factor(v_min,
v_max)
label, offset, pow1000 = _gen_offset_factor(label, v_min, v_max, eagerness)
cbar.set_ticklabels(["%.1f" % x for x in (ticks - offset)/1000**pow1000])
cbar.set_label(label)
def shift_scale_x(ax, eagerness=0.325):
"""Apply an offset and a factor as necessary to the x axis.
**Arguments:**
- *ax*: matplotlib.axes object
- *eagerness*: Parameter to adjust how little of an offset is required
before the label will be recentered
- 0: Offset is never applied.
- 1: Offset is always applied if it will help.
**Example:**
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from texunit import label_number
>>> from modelicares import *
>>> # Generate some random data.
>>> x = np.linspace(55478, 55486, 100) # Small range and large offset
>>> xlabel = label_number('Time', 's')
>>> y = np.cumsum(np.random.random(100) - 0.5)
>>> # Plot the data.
>>> ax = setup_subplots(2, 2, label='examples/shift_scale_x')[0]
>>> for a in ax:
... a.plot(x, y)
... a.set_xlabel(xlabel) # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
<matplotlib.text.Text object at 0x...>
[<matplotlib.lines.Line2D object at 0x...>]
<matplotlib.text.Text object at 0x...>
>>> # Shift and scale the axes.
>>> ax[0].set_title('Original plot') # doctest: +ELLIPSIS
<matplotlib.text.Text object at 0x...>
>>> ax[1].set_title('After applying offset and factor') # doctest: +ELLIPSIS
<matplotlib.text.Text object at 0x...>
>>> shift_scale_x(ax[1])
>>> save()
Saved examples/shift_scale_x.pdf
Saved examples/shift_scale_x.png
>>> plt.show()
.. only:: html
.. image:: ../examples/shift_scale_x.png
:scale: 70 %
:alt: example of shift_scale_x()
.. only:: latex
.. figure:: ../examples/shift_scale_x.pdf
:scale: 70 %
Example of shift_scale_x()
"""
# The concept here is based on:
# http://efreedom.com/Question/1-3677368/Matplotlib-Format-Axis-Offset-Values-Whole-Numbers-Specific-Number,
# accessed 2010/11/10
label = ax.get_xlabel()
ticks = ax.get_xticks()
label, offset, pow1000 = _gen_offset_factor(label, ticks[0], ticks[-1],
eagerness)
ax.set_xticklabels(["%.1f" % x for x in (ticks - offset)/1000**pow1000])
ax.set_xlabel(label)
def shift_scale_y(ax, eagerness=0.325):
"""Apply an offset and a factor as necessary to the y axis.
**Arguments:**
- *ax*: matplotlib.axes object
- *eagerness*: Parameter to adjust how little of an offset is required
before the label will be recentered
- 0: Offset is never applied.
- 1: Offset is always applied if it will help.
**Example:**
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from texunit import label_number
>>> from modelicares import *
>>> # Generate some random data.
>>> x = range(100)
>>> y = np.cumsum(np.random.random(100) - 0.5)
>>> y -= y.min()
>>> y *= 1e-3
>>> y += 1e3 # Small magnitude and large offset
>>> ylabel = label_number('Velocity', 'mm/s')
>>> # Plot the data.
>>> ax = setup_subplots(2, 2, label='examples/shift_scale_y')[0]
>>> for a in ax:
... a.plot(x, y)
... a.set_ylabel(ylabel) # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
<matplotlib.text.Text object at 0x...>
[<matplotlib.lines.Line2D object at 0x...>]
<matplotlib.text.Text object at 0x...>
>>> # Shift and scale the axes.
>>> ax[0].set_title('Original plot') # doctest: +ELLIPSIS
<matplotlib.text.Text object at 0x...>
>>> ax[1].set_title('After applying offset and factor') # doctest: +ELLIPSIS
<matplotlib.text.Text object at 0x...>
>>> shift_scale_y(ax[1])
>>> save()
Saved examples/shift_scale_y.pdf
Saved examples/shift_scale_y.png
>>> plt.show()
.. only:: html
.. image:: ../examples/shift_scale_y.png
:scale: 70 %
:alt: example of shift_scale_y()
.. only:: latex
.. figure:: ../examples/shift_scale_y.pdf
:scale: 70 %
Example of shift_scale_y()
"""
# The concept here is based on:
# http://efreedom.com/Question/1-3677368/Matplotlib-Format-Axis-Offset-Values-Whole-Numbers-Specific-Number,
# accessed 2010/11/10
label = ax.get_ylabel()
ticks = ax.get_yticks()
label, offset, pow1000 = _gen_offset_factor(label, ticks[0], ticks[-1],
eagerness)
ax.set_yticklabels(["%.1f" % x for x in (ticks - offset)/1000**pow1000])
ax.set_ylabel(label)
# From http://old.nabble.com/Arrows-using-Line2D-and-shortening-lines-td19104579.html,
# accessed 2010/11/2012
class ArrowLine(Line2D):
"""A matplotlib subclass to draw an arrowhead on a line
"""
__author__ = "Jason Grout"
__copyright__ = "Copyright (C) 2008"
__email__ = "jason-sage@..."
__license__ = "Modified BSD License"
from matplotlib.path import Path
arrows = {'>' : '_draw_triangle_arrow'}
_arrow_path = Path([[0.0, 0.0], [-1.0, 1.0], [-1.0, -1.0], [0.0, 0.0]],
[Path.MOVETO, Path.LINETO,Path.LINETO, Path.CLOSEPOLY])
def __init__(self, *args, **kwargs):
"""Initialize the line and arrow.
**Arguments:**
- *arrow* (='-'): Type of arrow ('<' | '-' | '>')
- *arrowsize* (=2*4): Size of arrow
- *arrowedgecolor* (='b'): Color of arrow edge
- *arrowfacecolor* (='b'): Color of arrow face
- *arrowedgewidth* (=4): Width of arrow edge
- *arrowheadwidth* (=\ *arrowsize*): Width of arrow head
- *arrowheadlength* (=\ *arrowsize*): Length of arrow head
- *\*args*, *\*\*kwargs*: Additional arguments for
:class:`matplotlib.lines.Line2D`
**Example:**
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> from modelicares import *
>>> fig = figure('examples/ArrowLine') # doctest: +ELLIPSIS
>>> ax = fig.add_subplot(111, autoscale_on=False)
>>> t = [-1,2]
>>> s = [0,-1]
>>> line = ArrowLine(t, s, color='b', ls='-', lw=2, arrow='>',
... arrowsize=20)
>>> ax.add_line(line) # doctest: +ELLIPSIS
<modelicares.base.ArrowLine object at 0x...>
>>> ax.set_xlim(-3, 3)
(-3, 3)
>>> ax.set_ylim(-3, 3)
(-3, 3)
>>> save()
Saved examples/ArrowLine.pdf
Saved examples/ArrowLine.png
>>> plt.show()
.. only:: html
.. image:: ../examples/ArrowLine.png
:scale: 70 %
:alt: example of ArrowLine
.. only:: latex
.. figure:: ../examples/ArrowLine.pdf
:scale: 70 %
Example of ArrowLine
"""
self._arrow = kwargs.pop('arrow', '-')
self._arrowsize = kwargs.pop('arrowsize', 2*4)
self._arrowedgecolor = kwargs.pop('arrowedgecolor', 'b')
self._arrowfacecolor = kwargs.pop('arrowfacecolor', 'b')
self._arrowedgewidth = kwargs.pop('arrowedgewidth', 4)
self._arrowheadwidth = kwargs.pop('arrowheadwidth', self._arrowsize)
self._arrowheadlength = kwargs.pop('arrowheadlength', self._arrowsize)
Line2D.__init__(self, *args, **kwargs)
def draw(self, renderer):
"""Draw the line and arrowhead using the passed renderer.
"""
#if self._invalid:
# self.recache()
renderer.open_group('arrowline2d')
if not self._visible:
return
Line2D.draw(self, renderer)
if self._arrow is not None:
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self._arrowedgecolor)
gc.set_linewidth(self._arrowedgewidth)
gc.set_alpha(self._alpha)
funcname = self.arrows.get(self._arrow, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path\
.get_transformed_points_and_affine()
arrow_func = getattr(self, funcname)
arrow_func(renderer, gc, tpath, affine.frozen())
renderer.close_group('arrowline2d')
def _draw_triangle_arrow(self, renderer, gc, path, path_trans):
"""Draw a triangular arrow.
"""
from math import atan2
from matplotlib.transforms import Affine2D
segment = [i[0] for i in path.iter_segments()][-2:]
startx, starty = path_trans.transform_point(segment[0])
endx, endy = path_trans.transform_point(segment[1])
angle = atan2(endy-starty, endx-startx)
halfwidth = 0.5*renderer.points_to_pixels(self._arrowheadwidth)
length = renderer.points_to_pixels(self._arrowheadlength)
transform = Affine2D().scale(length, halfwidth).rotate(angle)\
.translate(endx, endy)
rgbFace = self._get_rgb_arrowface()
renderer.draw_path(gc, self._arrow_path, transform, rgbFace)
def _get_rgb_arrowface(self):
"""Get the color of the arrow face.
"""
from matplotlib.cbook import is_string_like
from matplotlib.colors import colorConverter
facecolor = self._arrowfacecolor
if is_string_like(facecolor) and facecolor.lower() == 'none':
rgbFace = None
else:
rgbFace = colorConverter.to_rgb(facecolor)
return rgbFace
if __name__ == "__main__":
"""Test the contents of this file."""
import doctest
doctest.testmod()
| gpl-3.0 |
Adai0808/scikit-learn | sklearn/metrics/tests/test_classification.py | 83 | 49782 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
saquiba2/numpy2 | numpy/core/function_base.py | 25 | 7636 | from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace', 'may_share_memory']
from . import numeric as _nx
from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
if num > 1:
delta = stop - start
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y *= delta
else:
y *= step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
def may_share_memory(a, b, max_work=None):
"""Determine if two arrays can share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
"""
if max_work is None:
max_work = MAY_SHARE_BOUNDS
try:
return shares_memory(a, b, max_work=max_work)
except (TooHardError, OverflowError):
# Unable to determine, assume yes
return True
| bsd-3-clause |
rspavel/spack | var/spack/repos/builtin/packages/py-xarray/package.py | 5 | 1757 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyXarray(PythonPackage):
"""N-D labeled arrays and datasets in Python"""
homepage = "https://github.com/pydata/xarray"
url = "https://pypi.io/packages/source/x/xarray/xarray-0.9.1.tar.gz"
version('0.14.0', sha256='a8b93e1b0af27fa7de199a2d36933f1f5acc9854783646b0f1b37fed9b4da091')
version('0.13.0', sha256='80e5746ffdebb96b997dba0430ff02d98028ef3828e6db6106cbbd6d62e32825')
version('0.12.0', sha256='856fd062c55208a248ac3784cac8d3524b355585387043efc92a4188eede57f3')
version('0.11.0', sha256='636964baccfca0e5d69220ac4ecb948d561addc76f47704064dcbe399e03a818')
version('0.9.1', sha256='89772ed0e23f0e71c3fb8323746374999ecbe79c113e3fadc7ae6374e6dc0525')
depends_on('[email protected]:2.8,3.5:', when='@0.11:', type=('build', 'run'))
depends_on('[email protected]:', when='@0.12', type=('build', 'run'))
depends_on('[email protected]:', when='@0.13', type=('build', 'run'))
depends_on('[email protected]:', when='@0.14:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', when='@0.9.1', type=('build', 'run'))
depends_on('[email protected]:', when='@0.11:0.13', type=('build', 'run'))
depends_on('[email protected]:', when='@0.14:', type=('build', 'run'))
depends_on('[email protected]:', when='@0.9.1', type=('build', 'run'))
depends_on('[email protected]:', when='@0.11:0.13', type=('build', 'run'))
depends_on('[email protected]:', when='@0.14:', type=('build', 'run'))
| lgpl-2.1 |
robintw/PyMicrotops | setup.py | 1 | 3375 | #!/usr/bin/env python
# Copyright (c) 2015, Robin Wilson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Robin Wilson nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from setuptools import setup
PROJECT_ROOT = os.path.dirname(__file__)
def read_file(filepath, root=PROJECT_ROOT):
"""
Return the contents of the specified `filepath`.
* `root` is the base path and it defaults to the `PROJECT_ROOT` directory.
* `filepath` should be a relative path, starting from `root`.
"""
with open(os.path.join(root, filepath)) as fd:
text = fd.read()
return text
reqs = ['numpy', "python-dateutil", "pyserial", "matplotlib", "pandas"]
setup(
name = "PyMicrotops",
packages = ['PyMicrotops'],
install_requires = reqs,
version = "1.1.0",
author = "Robin Wilson",
author_email = "[email protected]",
description = ("A module to read and process data from the Microtops sun photometer, including reading via serial link and interpolating to AOT at any wavelength"),
license = "BSC",
url = "https://github.com/robintw/PyMicrotops",
entry_points = {'console_scripts': [
'read_microtops = PyMicrotops.read_from_serial:main'
]},
long_description = read_file('PyPI_README.rst'),
classifiers =[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2"
],
)
| bsd-3-clause |
airbnb/superset | superset/result_set.py | 2 | 8473 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Superset wrapper around pyarrow.Table.
"""
import datetime
import json
import logging
from typing import Any, Dict, List, Optional, Tuple, Type
import numpy as np
import pandas as pd
import pyarrow as pa
from superset import db_engine_specs
from superset.typing import DbapiDescription, DbapiResult
from superset.utils import core as utils
logger = logging.getLogger(__name__)
def dedup(l: List[str], suffix: str = "__", case_sensitive: bool = True) -> List[str]:
"""De-duplicates a list of string by suffixing a counter
Always returns the same number of entries as provided, and always returns
unique values. Case sensitive comparison by default.
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'])))
foo,bar,bar__1,bar__2,Bar
>>> print(
','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False))
)
foo,bar,bar__1,bar__2,Bar__3
"""
new_l: List[str] = []
seen: Dict[str, int] = {}
for item in l:
s_fixed_case = item if case_sensitive else item.lower()
if s_fixed_case in seen:
seen[s_fixed_case] += 1
item += suffix + str(seen[s_fixed_case])
else:
seen[s_fixed_case] = 0
new_l.append(item)
return new_l
def stringify(obj: Any) -> str:
return json.dumps(obj, default=utils.json_iso_dttm_ser)
def stringify_values(array: np.ndarray) -> np.ndarray:
vstringify = np.vectorize(stringify)
return vstringify(array)
def destringify(obj: str) -> Any:
return json.loads(obj)
class SupersetResultSet:
def __init__( # pylint: disable=too-many-locals,too-many-branches
self,
data: DbapiResult,
cursor_description: DbapiDescription,
db_engine_spec: Type[db_engine_specs.BaseEngineSpec],
):
self.db_engine_spec = db_engine_spec
data = data or []
column_names: List[str] = []
pa_data: List[pa.Array] = []
deduped_cursor_desc: List[Tuple[Any, ...]] = []
numpy_dtype: List[Tuple[str, ...]] = []
stringified_arr: np.ndarray
if cursor_description:
# get deduped list of column names
column_names = dedup([col[0] for col in cursor_description])
# fix cursor descriptor with the deduped names
deduped_cursor_desc = [
tuple([column_name, *list(description)[1:]])
for column_name, description in zip(column_names, cursor_description)
]
# generate numpy structured array dtype
numpy_dtype = [(column_name, "object") for column_name in column_names]
# only do expensive recasting if datatype is not standard list of tuples
if data and (not isinstance(data, list) or not isinstance(data[0], tuple)):
data = [tuple(row) for row in data]
array = np.array(data, dtype=numpy_dtype)
if array.size > 0:
for column in column_names:
try:
pa_data.append(pa.array(array[column].tolist()))
except (
pa.lib.ArrowInvalid,
pa.lib.ArrowTypeError,
pa.lib.ArrowNotImplementedError,
TypeError, # this is super hackey,
# https://issues.apache.org/jira/browse/ARROW-7855
):
# attempt serialization of values as strings
stringified_arr = stringify_values(array[column])
pa_data.append(pa.array(stringified_arr.tolist()))
if pa_data: # pylint: disable=too-many-nested-blocks
for i, column in enumerate(column_names):
if pa.types.is_nested(pa_data[i].type):
# TODO: revisit nested column serialization once nested types
# are added as a natively supported column type in Superset
# (superset.utils.core.DbColumnType).
stringified_arr = stringify_values(array[column])
pa_data[i] = pa.array(stringified_arr.tolist())
elif pa.types.is_temporal(pa_data[i].type):
# workaround for bug converting
# `psycopg2.tz.FixedOffsetTimezone` tzinfo values.
# related: https://issues.apache.org/jira/browse/ARROW-5248
sample = self.first_nonempty(array[column])
if sample and isinstance(sample, datetime.datetime):
try:
if sample.tzinfo:
tz = sample.tzinfo
series = pd.Series(
array[column], dtype="datetime64[ns]"
)
series = pd.to_datetime(series).dt.tz_localize(tz)
pa_data[i] = pa.Array.from_pandas(
series, type=pa.timestamp("ns", tz=tz)
)
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
self.table = pa.Table.from_arrays(pa_data, names=column_names)
self._type_dict: Dict[str, Any] = {}
try:
# The driver may not be passing a cursor.description
self._type_dict = {
col: db_engine_spec.get_datatype(deduped_cursor_desc[i][1])
for i, col in enumerate(column_names)
if deduped_cursor_desc
}
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
@staticmethod
def convert_pa_dtype(pa_dtype: pa.DataType) -> Optional[str]:
if pa.types.is_boolean(pa_dtype):
return "BOOL"
if pa.types.is_integer(pa_dtype):
return "INT"
if pa.types.is_floating(pa_dtype):
return "FLOAT"
if pa.types.is_string(pa_dtype):
return "STRING"
if pa.types.is_temporal(pa_dtype):
return "DATETIME"
return None
@staticmethod
def convert_table_to_df(table: pa.Table) -> pd.DataFrame:
return table.to_pandas(integer_object_nulls=True)
@staticmethod
def first_nonempty(items: List[Any]) -> Any:
return next((i for i in items if i), None)
def is_temporal(self, db_type_str: Optional[str]) -> bool:
return self.db_engine_spec.is_db_column_type_match(
db_type_str, utils.DbColumnType.TEMPORAL
)
def data_type(self, col_name: str, pa_dtype: pa.DataType) -> Optional[str]:
"""Given a pyarrow data type, Returns a generic database type"""
set_type = self._type_dict.get(col_name)
if set_type:
return set_type
mapped_type = self.convert_pa_dtype(pa_dtype)
if mapped_type:
return mapped_type
return None
def to_pandas_df(self) -> pd.DataFrame:
return self.convert_table_to_df(self.table)
@property
def pa_table(self) -> pa.Table:
return self.table
@property
def size(self) -> int:
return self.table.num_rows
@property
def columns(self) -> List[Dict[str, Any]]:
if not self.table.column_names:
return []
columns = []
for col in self.table.schema:
db_type_str = self.data_type(col.name, col.type)
column = {
"name": col.name,
"type": db_type_str,
"is_date": self.is_temporal(db_type_str),
}
columns.append(column)
return columns
| apache-2.0 |
WilsonWangTHU/neural_graph_evolution | env/visualize_species.py | 1 | 8276 | # -----------------------------------------------------------------------------
# @author:
# Tingwu Wang
# @brief:
# generate the videos into the same directory
# -----------------------------------------------------------------------------
import numpy as np
import argparse
import glob
import cv2
import os
import init_path
from tqdm import tqdm
# import matplotlib.pyplot as plt
def get_candidates(args):
# base_path for the base, candidate_list for the topology data
# case one: plot all species, or one of the species
# XX/species_topology
# case two: plot the top ranked_species
# XX/species_data
# case three: plot the top ranked_species's video
# XX/species_video
if args.file_name.endswith('.npy'):
candidate_list = [args.file_name]
else:
candidate_list = glob.glob(os.path.join(args.file_name, '*.npy'))
candidate_list = [i_candidate for i_candidate in candidate_list
if 'rank_info' not in i_candidate]
if 'species_topology' in args.file_name:
species_topology_list = candidate_list
elif 'species_data' in args.file_name:
species_topology_list = candidate_list
else:
assert 'species_video' in args.file_name
species_topology_list = [
os.path.join(
os.path.dirname(i_candidate).replace('species_video',
'species_topology'),
os.path.basename(i_candidate).split('_')[1] + '.npy'
)
for i_candidate in candidate_list
]
task = os.path.abspath(candidate_list[0]).split(
init_path.get_abs_base_dir()
)[1].split('/')[2].split('_')[0]
task = task.replace('/', '')
return candidate_list, species_topology_list, task
if __name__ == '__main__':
'''
@brief:
Either plot the directory, or simply one npy file
'''
parser = argparse.ArgumentParser(description="Plot results from a dir")
parser.add_argument(
"-i", "--file_name", type=str, required=True,
help="The directory of the summary file"
)
parser.add_argument(
"-s", "--size", type=int, required=False, default=480
)
# temporary fix
parser.add_argument(
'--fish_target_angle', type=float, required=False, default=np.pi / 6
)
parser.add_argument(
'--fish_circle_radius', type=float, required=False, default=0.5
)
parser.add_argument(
'--walker_ctrl_coeff', type=float, required=False, default=0.001
)
parser.add_argument('--fish_target_speed', type=float, default=0.002,
help='the speed of the target')
parser.add_argument(
"-v", "--video", type=int, required=True, default=0,
help='whether to generate the videos or just images'
)
parser.add_argument(
"-l", "--video_length", type=int, required=False, default=100,
)
args = parser.parse_args()
candidate_list, species_topology_list, task = get_candidates(args)
if 'finetune' in candidate_list[0]:
args.optimize_creature = True
else:
args.optimize_creature = False
args.task = task
for i_id, candidate in enumerate(tqdm(candidate_list)):
# process each environment
data = np.load(candidate)
if data.dtype != np.float:
data = data.item()
# TODO:
try:
if 'species_video' in args.file_name:
topology_data = np.load(species_topology_list[i_id]).item()
if candidate.endswith('5.npy') == False: continue
elif 'species_data' in args.file_name:
topology_data = np.load(candidate_list[i_id]).item()
elif 'species_topology' in args.file_name:
topology_data = np.load(candidate).item()
else:
assert 0, 'The input filename is not valid.'
except Exception as e:
print(e)
continue
if 'fish' in task:
import fish_env_wrapper
env = fish_env_wrapper.dm_evofish3d_wrapper(args=args, rand_seed=1, monitor=0,
adj_matrix=topology_data['adj_matrix'], xml_str=topology_data['xml_str'])
# @HZ: a very hacky way to change the upper camera view
# import pdb; pdb.set_trace()
topology_data['xml_str'] = topology_data['xml_str'].decode('utf-8').replace(
'<camera mode=\"trackcom\" name=\"tracking_top\" pos=\"0 0 1\" xyaxes=\"1 0 0 0 1 0\"',
'<camera mode=\"trackcom\" name=\"tracking_top\" pos=\"-.1 .2 .2\" xyaxes=\"-2-1 0 -0 -.5 1\"')
env = fish_env_wrapper.dm_evofish3d_wrapper(
args=args, rand_seed=1, monitor=0,
adj_matrix=topology_data['adj_matrix'],
xml_str=topology_data['xml_str']
)
elif 'walker' in task or 'cheetah' in task or 'hopper' in task:
from env import walker_env_wrapper
env = walker_env_wrapper.dm_evowalker_wrapper(
args=args, rand_seed=1, monitor=0,
adj_matrix=topology_data['adj_matrix'],
xml_str=topology_data['xml_str']
)
else:
assert 0
action_size = env.env.action_spec().shape[0]
if args.video:
# check generation
gen_num = int(candidate.split('/')[-1].split('_')[0])
if gen_num % 10 == 0 or gen_num <= 5:
pass
else:
continue
# save the videos
if os.path.exists(candidate.replace('.npy', '.mp4')):
continue
video = cv2.VideoWriter(
candidate.replace('.npy', '.mp4'),
cv2.VideoWriter_fourcc(*'mp4v'),
40, (args.size * 2, args.size)
)
env.reset()
if 'species_video' in candidate:
# recover the videos
for i_frame in range(min(len(data), args.video_length)):
with env.env.physics.reset_context():
if 'fish' in args.task:
# set the target position
env.env.physics.named.model.geom_pos['target', 'x'] = \
data[i_frame][-2]
env.env.physics.named.model.geom_pos['target', 'y'] = \
data[i_frame][-1]
env.env.physics.data.qpos[:] = data[i_frame][:-2]
else:
env.env.physics.data.qpos[:] = data[i_frame]
image = np.hstack(
[env.env.physics.render(args.size, args.size, camera_id=0),
env.env.physics.render(args.size, args.size, camera_id=1)]
)
# rgb to bgr
image = image[:, :, [2, 1, 0]]
video.write(image)
else:
# generate videos using random actions
for i_frame in range(args.video_length):
env.step((np.random.rand(action_size) - 0.5) * 2)
image = np.hstack(
[env.env.physics.render(args.size, args.size, camera_id=0),
env.env.physics.render(args.size, args.size, camera_id=1)]
)
# rgb to bgr
image = image[:, :, [2, 1, 0]]
video.write(image)
video.release()
else:
# save the screenshot of the species
if os.path.exists(candidate.replace('.npy', '.png')):
continue
env.reset()
for _ in range(30):
env.step((np.random.rand(action_size) - 0.5) * 2)
image = np.hstack(
[env.env.physics.render(args.size, args.size, camera_id=0),
env.env.physics.render(args.size, args.size, camera_id=1)]
)
# rgb to bgr
image = image[:, :, [2, 1, 0]]
cv2.imwrite(candidate.replace('.npy', '.png'), image)
| mit |
vorasagar7/sp17-i524 | project/S17-IR-2002/code/cc_analyze_data.py | 19 | 2962 | import matplotlib as mpl
mpl.use('Agg')
import pandas as pd
import numpy as np
import datetime
import os
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
#READ 3 MILLIONS ROWS DATASET
d=pd.read_csv("/home/cc/h1b_m3Rows.csv")
#READ TWO MILLIONS ROWS DATASET FOR BENCHMARKING
#d=pd.read_csv("/home/cc/h1b_m2Rows.csv")
#READ ONE MILLIONS ROWS DATASET FOR BENCHMARKING
#d=pd.read_csv("/home/cc/h1b_m1Rows.csv")
#READ DATASET SUBSET BY DATA SCEINCE RELATED JOB_TITLE ONLY FOR BENCHMARKING
#d=pd.read_csv("/home/cc/h1b_DataScienceOnly.csv")
#CASE_STATUS DISTRIBUTION
print('--------------------------------------------------------------------')
print('****************** CASE STATUS DISTRIBUTION ************************')
print('--------------------------------------------------------------------')
print(d['CASE_STATUS'].value_counts())
print('--------------------------------------------------------------------')
print(' ')
#PETITION PER STATE PER YEAR
d['STATE']=d['WORKSITE'].str.split(', ').str[1]
state_data=d.groupby(['STATE', 'YEAR']).size()
state_year_data=state_data.unstack()
state_year_data['TOTAL'] = state_year_data.sum(axis=1)
print('--------------------------------------------------------------------------------')
print('************************** PETITION PER STATE PER YEAR *************************')
print('--------------------------------------------------------------------------------')
print(state_year_data)
print('--------------------------------------------------------------------------------')
print(' ')
#LOCATION HIRE DATA SCIENTIST THE MOST
location=d[d['JOB_TITLE']=='DATA SCIENTIST']
print('--------------------------------------------------------------------')
print('**************** TOP 25 LOCATION HIRING DATA SCIENTIST *************')
print('--------------------------------------------------------------------')
print(location['WORKSITE'].value_counts().head(25))
print('--------------------------------------------------------------------')
print(' ');
#TOP EMPLOYER HIRE DATA SCIENTIST
employer=d[d['JOB_TITLE']=='DATA SCIENTIST']
print('--------------------------------------------------------------------')
print('************* TOP 25 COMPANY HIRING DATA SCIENTIST *****************')
print('--------------------------------------------------------------------')
print(employer['EMPLOYER_NAME'].value_counts().head(25))
print('--------------------------------------------------------------------')
print(' ')
#DATA SCIENTISTS WAGE DIFFER ACROSS STATES
d.loc[:,'WORKSITE'] = d.loc[:,'WORKSITE'].apply(lambda x:x.split(',')[-1][1:])
job_title_group = d.groupby('JOB_TITLE')
for key in job_title_group.groups.keys():
if key == 'DATA SCIENTIST':
d = job_title_group.get_group(key)[['WORKSITE','PREVAILING_WAGE']]
d.boxplot(column = 'PREVAILING_WAGE', by = 'WORKSITE' , fontsize = 6)
plt.xticks(rotation = 45)
plt.savefig("/home/cc/images/wage.png")
| apache-2.0 |
CopyChat/Plotting | Python/TestCode/contour_demo.py | 14 | 3478 | #!/usr/bin/env python
"""
Illustrate simple contour plotting, contours on an image with
a colorbar for the contours, and labelled contours.
See also contour_image.py.
"""
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
plt.figure()
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('Simplest default with labels')
# contour labels can be placed manually by providing list of positions
# (in data coordinate). See ginput_manual_clabel.py for interactive
# placement.
plt.figure()
CS = plt.contour(X, Y, Z)
manual_locations = [(-1, -1.4), (-0.62, -0.7), (-2, 0.5), (1.7, 1.2), (2.0, 1.4), (2.4, 1.7)]
plt.clabel(CS, inline=1, fontsize=10, manual=manual_locations)
plt.title('labels at selected locations')
# You can force all the contours to be the same color.
plt.figure()
CS = plt.contour(X, Y, Z, 6,
colors='k', # negative contours will be dashed by default
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Single color - negative contours dashed')
# You can set negative contours to be solid instead of dashed:
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
plt.figure()
CS = plt.contour(X, Y, Z, 6,
colors='k', # negative contours will be dashed by default
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Single color - negative contours solid')
# And you can manually specify the colors of the contour
plt.figure()
CS = plt.contour(X, Y, Z, 6,
linewidths=np.arange(.5, 4, .5),
colors=('r', 'green', 'blue', (1,1,0), '#afeeee', '0.5')
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Crazy lines')
# Or you can use a colormap to specify the colors; the default
# colormap will be used for the contour lines
plt.figure()
im = plt.imshow(Z, interpolation='bilinear', origin='lower',
cmap=cm.gray, extent=(-3,3,-2,2))
levels = np.arange(-1.2, 1.6, 0.2)
CS = plt.contour(Z, levels,
origin='lower',
linewidths=2,
extent=(-3,3,-2,2))
#Thicken the zero contour.
zc = CS.collections[6]
plt.setp(zc, linewidth=4)
plt.clabel(CS, levels[1::2], # label every second level
inline=1,
fmt='%1.1f',
fontsize=14)
# make a colorbar for the contour lines
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.title('Lines with colorbar')
#plt.hot() # Now change the colormap for the contour lines and colorbar
plt.flag()
# We can still add a colorbar for the image, too.
CBI = plt.colorbar(im, orientation='horizontal', shrink=0.8)
# This makes the original colorbar look a bit out of place,
# so let's improve its position.
l,b,w,h = plt.gca().get_position().bounds
ll,bb,ww,hh = CB.ax.get_position().bounds
CB.ax.set_position([ll, b+0.1*h, ww, h*0.8])
plt.show()
| gpl-3.0 |
toastedcornflakes/scikit-learn | examples/mixture/plot_gmm.py | 18 | 3038 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10., 10.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm._get_covars(), 1,
'Dirichlet Process GMM')
plt.show()
| bsd-3-clause |
costypetrisor/scikit-learn | sklearn/utils/tests/test_validation.py | 133 | 18339 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
containers-ftw/cftw | cftw/bases/templates/bases/continuumio-anaconda3/analysis/helpers/results.py | 2 | 2636 | '''
results.py: container competition results helper functions
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from .logger import bot
import pandas
import os
RESULTBASE = os.environ.get("CONTAINERSFTW_RESULT",'/code/results')
def save_result(result,result_type=None,sep=None,result_base=None):
'''save a result required for the competition'''
if result_type is None:
result_type = "submission"
if sep is None:
sep = ','
filename = get_resultfile(name=result_type,
result_base=result_base)
if filename is not None:
if isinstance(result,pandas.DataFrame):
result.to_csv(filename,index=False,sep=sep)
else:
bot.error("%s is not a valid result type." %result_type)
return filename
def list_results(result_base=None):
'''list the results that a complete model must provide'''
if result_base is None:
result_base = RESULTBASE
# TODO: this should also be generated from a json that is derived automatically
results = {'submission':'%s/submission.csv' %result_base}
for title,path in results.items():
bot.debug("%s : %s" %(title,path))
return results
def get_resultfile(name=None,result_base=None):
'''get_resultfile returns the file for the result, or None if
not defined
'''
results = list_results(result_base)
if name is not None:
name = os.path.splitext(name)[0].lower()
if name in results:
return results[name]
bot.info("Results required include: %s" %(','.join(list(results.keys()))))
return None
| mit |
kezilu/pextant | pextant/casestudy/idaho.py | 2 | 3041 | from pextant.api_future import *
from pextant.EnvironmentalModel import *
from osgeo import gdal
dem_path = 'maps/hwmidlow.tif'
import pandas as pd
import json
pd.options.display.max_rows = 5
with open('waypoints/MD10_EVA10_Stn18_Stn23_X.json') as data_file:
data = json.load(data_file)
ways_and_segments = data['sequence']
s = pd.DataFrame(ways_and_segments)
waypoints = s[s['type']=='Station']['geometry']
w = waypoints.values.tolist()
latlongFull = pd.DataFrame(w)
latlongInter = latlongFull['coordinates'].values.tolist()
latlong = pd.DataFrame(latlongInter, columns=['longitude','latitude'])
latlongcoord = LatLongCoord(latlong['latitude'].values,latlong['longitude'].values)
utm = latLongToUTM(latlongcoord)
utmmaxx, utmminx = utm.easting.max(), utm.easting.min()
utmmaxy, utmminy = utm.northing.max(), utm.northing.min()
NWCorner = UTMCoord(utmminx, utmmaxy, utm.zone)
SECorner = UTMCoord(utmmaxx, utmminy, utm.zone)
print(UTMToLatLong(NWCorner))
print(UTMToLatLong(SECorner))
dem_map = loadElevationMap(dem_path, maxSlope=25, nw_corner=NWCorner, se_corner=SECorner)
astronaut = Astronaut(70)
P = Pathfinder(astronaut, dem_map)
lat,lon = latlong[['latitude','longitude']].iloc[7]
print(lat,lon)
latlong0 = LatLongCoord(lat, lon);
utm0 = latLongToUTM(latlong0)
ap0 = ActivityPoint(latlong0, 0)
row0, col0 = dem_map.convertToRowCol(utm0)
print(row0,col0)
lat,lon = latlong[['latitude','longitude']].iloc[8]
print(lat,lon)
latlong1 = LatLongCoord(lat, lon);
utm1 = latLongToUTM(latlong1)
ap1 = ActivityPoint(latlong1, 0)
row1, col1 = dem_map.convertToRowCol(utm1)
print(row1,col1)
lat,lon = latlong[['latitude','longitude']].iloc[9]
latlong2 = LatLongCoord(lat, lon);
utm2 = latLongToUTM(latlong2)
ap2 = ActivityPoint(latlong2, 0)
print(lat,lon)
ap2 = ActivityPoint(LatLongCoord(lat, lon), 0)
row2, col2 = dem_map.convertToRowCol(utm2)
print(row2,col2)
from bokeh.plotting import figure, output_file, show
from bokeh.io import hplot
output_file("lines.html", title="line plot example")
dh, dw = dem_map.elevations.shape
print dw,dh
# create a new plot with a title and axis labels
s1 = figure(title="simple line example", x_axis_label='x', y_axis_label='y', x_range=[0, 250], y_range=[250, 500])
s2 = figure(title="simple line example", x_axis_label='x', y_axis_label='y', x_range=[0, 250], y_range=[250, 500])
# add a line renderer with legend and line thickness
s1.image(image=[dem_map.elevations[::-1,:]], dw=dw, dh=dh, palette="Spectral11")
s2.image(image=[dem_map.obstacles[::-1,:]], dw=dw, dh=dh)
# show the results
final = P.aStarCompletePath([0, 0, 1], [ap1, ap2], 'tuple', [s1, s2], dh)
if len(final)>0:
for elt in final[0]:
s1.circle(elt[1], dh - elt[0], fill_color="yellow", line_color="yellow")
s2.circle(elt[1], dh-elt[0], fill_color="yellow", line_color="yellow")
s1.circle([col1, col2], [dh - row1, dh - row2], fill_color="orange", line_color="orange")
s2.circle([col1, col2], [dh - row1, dh - row2], fill_color="orange", line_color="orange")
print final
p = hplot(s1, s2)
show(p) | mit |
andreashorn/lead_dbs | ext_libs/OSS-DBS/OSS_platform/Electrode_files/Medtronic3389_profile.py | 1 | 29157 | # -*- coding: utf-8 -*-
###
### This file is generated automatically by SALOME v8.3.0 with dump python functionality
###### Run with DPS_lead_position_V9.py
import sys
import salome
salome.salome_init()
theStudy = salome.myStudy
import salome_notebook
notebook = salome_notebook.NoteBook(theStudy)
sys.path.insert( 0, r'/home/trieu/electrode_dir')
###
### GEOM component
###
########################################### extra code 1 V10 15/12/18#############################################
###### This file runs with DBS_lead_position_V10.py
import os
sys.path.insert( 0, r'{}'.format(os.getcwd()))
sys.path.append('/usr/local/lib/python2.7/dist-packages')
#from pandas import read_csv
##### DEFAULT LIST #####
#Lead2nd_Enable = True
#Xt = 0
#Yt = 5
#Zt = 0
#X_2nd = 0
#Y_2nd = 5
#Z_2nd = 0
#OZ_angle = 0
#Xm = 0
#Ym = 0
#Zm = 0
#encap_thickness = 0.1
#ROI_radial = 13
#Vertice_enable = False
#Brain_map = '/home/trieu/electrode_dir/brain_elipse.brep'
#if(Lead2nd_Enable):
# Xt2 = 0
# Yt2 = -5
# Zt2 = 0
# OX_angle2 = 0
# OY_angle2 = 0
# OZ_angle2 = 0
##### VARIABLE LIST #####
########## End of variable list#############
if Z_2nd == Zt:
Z_2nd_artif = Zt+1.0 # just to ensure the rotation is possible
else:
Z_2nd_artif=Z_2nd
#for Lead-DBS, the tip point should be shifted down (they use the middle of the lowest contact as the reference point)
Zt_tip=Zt-2.25 #for Medtronic3389
Vert_array =[0];
number_vertex = len(Vert_array)
Vert = []
VolumeObject1 = []
ContactObject1 = []
VolumeObject2 = []
ContactObject2 = []
print " DBS_lead's Geometry buid\n"
######################################### end of extra code 1 ########################################
######################################################################################################
from salome.geom import geomBuilder
import math
import SALOMEDS
geompy = geomBuilder.New(theStudy)
O = geompy.MakeVertex(0, 0, 0)
OX = geompy.MakeVectorDXDYDZ(1, 0, 0)
OY = geompy.MakeVectorDXDYDZ(0, 1, 0)
OZ = geompy.MakeVectorDXDYDZ(0, 0, 1)
Circle_1 = geompy.MakeCircle(O, OZ, 0.635)
Contact_1 = geompy.MakePrismVecH(Circle_1, OZ, 0.75*stretch+0.75)
geompy.TranslateDXDYDZ(Contact_1, 0, 0, 0.865)
Contact_1_full = geompy.MakePrismVecH(Circle_1, OZ, 1.5*stretch)
geompy.TranslateDXDYDZ(Contact_1_full, 0, 0, 0.865)
Contact_2 = geompy.MakeTranslation(Contact_1_full, 0, 0, 1.25*stretch+0.75)
Contact_3 = geompy.MakeTranslation(Contact_1_full, 0, 0, 3.25*stretch+0.75)
Contact_4 = geompy.MakeTranslation(Contact_1, 0, 0, 5.25*stretch+0.75)
Cylinder_1 = geompy.MakeCylinderRH(0.635, 149.365)
Sphere_1 = geompy.MakeSphereR(0.635)
Fuse_1 = geompy.MakeFuseList([Cylinder_1, Sphere_1], True, True)
Cylinder_2 = geompy.MakeCylinderRH(encap_thickness+0.635, 149.365)
Sphere_2 = geompy.MakeSphereR(encap_thickness+0.635)
Fuse_2 = geompy.MakeFuseList([Cylinder_2, Sphere_2], True, True)
encap_layer = geompy.MakeCutList(Fuse_2, [Fuse_1], True)
geompy.TranslateDXDYDZ(Circle_1, 0, 0, 0.635)
geompy.TranslateDXDYDZ(Contact_1, 0, 0, 0.635)
geompy.TranslateDXDYDZ(Contact_2, 0, 0, 0.635)
geompy.TranslateDXDYDZ(Contact_3, 0, 0, 0.635)
geompy.TranslateDXDYDZ(Contact_4, 0, 0, 0.635)
geompy.TranslateDXDYDZ(Cylinder_1, 0, 0, 0.635)
geompy.TranslateDXDYDZ(Sphere_1, 0, 0, 0.635)
geompy.TranslateDXDYDZ(Fuse_1, 0, 0, 0.635)
geompy.TranslateDXDYDZ(Cylinder_2, 0, 0, 0.635)
geompy.TranslateDXDYDZ(Sphere_2, 0, 0, 0.635)
geompy.TranslateDXDYDZ(Fuse_2, 0, 0, 0.635)
geompy.TranslateDXDYDZ(encap_layer, 0, 0, 0.635)
Sphere_ROI = geompy.MakeSphereR(ROI_radial)
encap_outer_ROI = geompy.MakeCutList(encap_layer, [Sphere_ROI], True)
encap_inner_ROI = geompy.MakeCutList(encap_layer, [encap_outer_ROI], True)
Fuse_all_lead_encap_ROI = geompy.MakeFuseList([Sphere_ROI, Fuse_2], True, True)
ROI = geompy.MakeCutList(Sphere_ROI, [Fuse_2], True)
CV1 = geompy.MakeCylinderRH(0.635, 0.75*stretch+0.75)
geompy.TranslateDXDYDZ(CV1, 0, 0, 1.5)
CV1_full = geompy.MakeCylinderRH(0.635, 1.5*stretch)
geompy.TranslateDXDYDZ(CV1_full, 0, 0, 1.5)
CV2 = geompy.MakeTranslation(CV1_full, 0, 0, 0.75+1.25*stretch)
CV3 = geompy.MakeTranslation(CV1_full, 0, 0, 3.25*stretch+0.75)
CV4 = geompy.MakeTranslation(CV1, 0, 0, 5.25*stretch+0.75)
##################################################################################################################
########################################### extra code 2 V10 15/12/18#############################################
print " Load brain image \n"
if (Brain_map[-4:] == 'brep'):
brain_solid = geompy.ImportBREP( Brain_map )
elif (Brain_map[-4:] == 'step'):
brain_solid = geompy.ImportSTEP( Brain_map )
elif (Brain_map[-4:] == 'iges'):
brain_solid = geompy.ImportIGES( Brain_map )
elif (Brain_map[-4:] == '.stl'):
brain_solid = geompy.ImportSTL( Brain_map )
else:
print " unknow imported file format"
Fuse_all_lead_encap_ROI_no_internal_face = geompy.RemoveInternalFaces(Fuse_all_lead_encap_ROI)
#################################################### Geometry and extra code interface ##############################################################
VolumeObject1 = [ encap_outer_ROI,ROI,encap_inner_ROI,CV1,CV2,CV3,CV4] # Declare objects included to partition, encap_outer_ROI always @1st position
Volume_name1 = ['encap_outer_ROI1','ROI1','encap_inner_ROI1','CV1_1','CV1_2','CV1_3','CV1_4'] # Declare name of the group in the partition for volume
ContactObject1 = [Contact_1,Contact_2,Contact_3,Contact_4]
Contact_name1 = ['Contact1_1','Contact1_2','Contact1_3','Contact1_4']
if(Lead2nd_Enable): ################## 2nd LEAD ###############################################
VolumeObject2 = [ROI]*len(VolumeObject1)
ContactObject2 = [Contact_1]*len(ContactObject1)
Volume_name2 = [ 'encap_outer_ROI2','ROI2','encap_inner_ROI2','CV2_1','CV2_2','CV2_3','CV2_4']
Contact_name2 = ['Contact2_1','Contact2_2','Contact2_3','Contact2_4']
##############################################################################################################################################
print "Position 2nd Fuse all object at [{},{},{}], [{}',{}',{}']\n".format(Xt2,Yt2,Zt2,OX_angle2,OY_angle2,OZ_angle2)
Fuse_all_lead_encap_ROI_no_internal_face2 = geompy.MakeTranslation(Fuse_all_lead_encap_ROI_no_internal_face,Xt2,Yt2,Zt2)
OX2 = geompy.MakeTranslation(OX,Xt2,Yt2,Zt2)
OY2 = geompy.MakeTranslation(OY,Xt2,Yt2,Zt2)
OZ2 = geompy.MakeTranslation(OZ,Xt2,Yt2,Zt2)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face2, OX2,OX_angle2*math.pi/180.0)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face2, OY2,OY_angle2*math.pi/180.0)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face2, OZ2,OZ_angle2*math.pi/180.0)
print "Position 2nd Lead at [{},{},{}], [{}',{}',{}']\n".format(Xt2,Yt2,Zt2,OX_angle2,OY_angle2,OZ_angle2)
for i in range(0,len(VolumeObject1)):
VolumeObject2[i] = geompy.MakeTranslation(VolumeObject1[i],Xt2,Yt2,Zt2)
geompy.Rotate(VolumeObject2[i], OX2,OX_angle2*math.pi/180.0)
geompy.Rotate(VolumeObject2[i], OY2,OY_angle2*math.pi/180.0)
geompy.Rotate(VolumeObject2[i], OZ2,OZ_angle2*math.pi/180.0)
for i in range(0,len(ContactObject1)):
ContactObject2[i] = geompy.MakeTranslation(ContactObject1[i],Xt2,Yt2,Zt2)
geompy.Rotate(ContactObject2[i], OX2,OX_angle2*math.pi/180.0)
geompy.Rotate(ContactObject2[i], OY2,OY_angle2*math.pi/180.0)
geompy.Rotate(ContactObject2[i], OZ2,OZ_angle2*math.pi/180.0)
print "Cut outer ROI2 with brain\n"
cut_outer_ROI = geompy.MakeCutList(VolumeObject2[0], [brain_solid], True)
VolumeObject2[0] = geompy.MakeCutList(VolumeObject2[0], [cut_outer_ROI], True)
print "Cut ROI2 with brain\n"
VolumeObject2[1] = geompy.MakeCommonList([VolumeObject2[1], brain_solid], True)
print "Group 2nd:volume and area extraction for group ID identification process\n"
Volume2_Pro = [geompy.BasicProperties( VolumeObject2[0])]*len(VolumeObject2)
Contact2_Pro = [geompy.BasicProperties( ContactObject2[0])]*len(ContactObject2)
for i in range(0,len(VolumeObject2)):
Volume2_Pro[i] = geompy.BasicProperties( VolumeObject2[i])
for i in range(0,len(ContactObject2)):
Contact2_Pro[i] = geompy.BasicProperties( ContactObject2[i])
################## LEAD 1st #############################################################
#print "Position 1st Fuse all object at [{},{},{}], [{}',{}',{}']\n".format(Xt,Yt,Zt,OX_angle,OY_angle,OZ_angle)
geompy.TranslateDXDYDZ(Fuse_all_lead_encap_ROI_no_internal_face,Xt,Yt,Zt_tip)
OX1 = geompy.MakeTranslation(OX,Xt,Yt,Zt_tip)
OY1 = geompy.MakeTranslation(OY,Xt,Yt,Zt_tip)
OZ1 = geompy.MakeTranslation(OZ,Xt,Yt,Zt_tip)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face, OZ1,OZ_angle*math.pi/180.0)
Vertex_1 = geompy.MakeVertex(X_2nd,Y_2nd,Z_2nd)
Vertex_O = geompy.MakeVertex(Xt,Yt,Zt)
Vertex_3 = geompy.MakeVertex(Xt,Yt,Z_2nd_artif)
if X_2nd!=Xt or Y_2nd!=Yt:
Fuse_all_lead_encap_ROI_no_internal_face=geompy.MakeRotationThreePoints(Fuse_all_lead_encap_ROI_no_internal_face, Vertex_O, Vertex_3, Vertex_1)
#print "Position 1st Lead at [{},{},{}], [{}',{}',{}']\n".format(Xt,Yt,Zt,OX_angle,OY_angle,OZ_angle)
for i in range(0,len(VolumeObject1)):
geompy.TranslateDXDYDZ(VolumeObject1[i],Xt,Yt,Zt_tip)
geompy.Rotate(VolumeObject1[i], OZ1,OZ_angle*math.pi/180.0)
if X_2nd!=Xt or Y_2nd!=Yt:
VolumeObject1[i]=geompy.MakeRotationThreePoints(VolumeObject1[i], Vertex_O, Vertex_3, Vertex_1)
for i in range(0,len(ContactObject1)):
geompy.TranslateDXDYDZ(ContactObject1[i],Xt,Yt,Zt_tip)
geompy.Rotate(ContactObject1[i], OZ1,OZ_angle*math.pi/180.0)
if X_2nd!=Xt or Y_2nd!=Yt:
ContactObject1[i]=geompy.MakeRotationThreePoints(ContactObject1[i], Vertex_O, Vertex_3, Vertex_1)
print "Cut outer ROI1 with brain\n"
cut_outer_ROI = geompy.MakeCutList(VolumeObject1[0], [brain_solid], True)
VolumeObject1[0] = geompy.MakeCutList(VolumeObject1[0], [cut_outer_ROI], True)
print "Cut ROI1 with brain\n"
VolumeObject1[1] = geompy.MakeCommonList([VolumeObject1[1], brain_solid], True)
print "Group 1st:volume and area extraction for group ID identification process\n"
Volume1_Pro = [geompy.BasicProperties( VolumeObject1[0])]*len(VolumeObject1)
Contact1_Pro = [geompy.BasicProperties( ContactObject1[0])]*len(ContactObject1)
for i in range(0,len(VolumeObject1)):
Volume1_Pro[i] = geompy.BasicProperties( VolumeObject1[i])
for i in range(0,len(ContactObject1)):
Contact1_Pro[i] = geompy.BasicProperties( ContactObject1[i])
print "Create reference groups for ID identification process\n"
if(Lead2nd_Enable):
Rest = geompy.MakeCutList(brain_solid, [Fuse_all_lead_encap_ROI_no_internal_face,Fuse_all_lead_encap_ROI_no_internal_face2], True)
Partition_profile = geompy.MakePartition(VolumeObject1+VolumeObject2+ContactObject1+ContactObject2, [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
###reference_volume
reference_volume = VolumeObject1 + VolumeObject2
reference_volume_Pro = Volume1_Pro + Volume2_Pro
Volume_name = Volume_name1+Volume_name2
### reference_area
reference_surface = ContactObject1 + ContactObject2
reference_surface_Pro = Contact1_Pro + Contact2_Pro
Contact_name = Contact_name1+Contact_name2
Group_volume = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])] * (len(VolumeObject1)+len(VolumeObject2)+1) # +1 is Rest Group
Group_surface = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["FACE"])] * (len(ContactObject1)+len(ContactObject2))
else:
Rest = geompy.MakeCutList(brain_solid, [Fuse_all_lead_encap_ROI_no_internal_face], True)
Partition_profile = geompy.MakePartition(VolumeObject1+ContactObject1, [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
###reference_volume
reference_volume = VolumeObject1
reference_volume_Pro = Volume1_Pro
Volume_name = Volume_name1
### reference_area
reference_surface = ContactObject1
reference_surface_Pro = Contact1_Pro
Contact_name = Contact_name1
Group_volume = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])] * (len(VolumeObject1)+1) # +1 is Rest Group
Group_surface = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["FACE"])] * len(ContactObject1)
### find out subshape and subshape ID
Group_surface_ListIDs =[]
Group_volume_ListIDs =[]
Group_partition_volume = []
Group_partition_surface = []
### find group volume ID ######################################################################
Partition_volume_IDsList = geompy.SubShapeAllIDs(Partition_profile, geompy.ShapeType["SOLID"]) # list all sub shape volume in Partition
print "Partition_volume_IDsList",Partition_volume_IDsList, '\n'
for ref_ind in range (0, len(reference_volume)):
temp_volume = []
for sub_ind in range (0, len (Partition_volume_IDsList)):
subshape = geompy.GetSubShape(Partition_profile, [Partition_volume_IDsList[sub_ind]]) # get subshape
subshape_Pro = geompy.BasicProperties(subshape) # extract volume of subshape
Common_volume = geompy.MakeCommonList([subshape, reference_volume[ref_ind]], True) # check common intersection
Common_volume_Pro = geompy.BasicProperties(Common_volume)
print "volume difference",abs(Common_volume_Pro[2]-subshape_Pro[2]),"/",abs(Common_volume_Pro[2]-reference_volume_Pro[ref_ind][2])
# if ( common volume = subshape) and (common volume = ref volume) => ref volume = sub shape
if (abs(Common_volume_Pro[2]-subshape_Pro[2])< 0.0003) and (abs(Common_volume_Pro[2]-reference_volume_Pro[ref_ind][2])<0.0003):
Group_partition_volume.append([Volume_name[ref_ind],Partition_volume_IDsList[sub_ind]])
# if ( common volume = subshape) and (common volume < ref volume) => sub shape belong to ref volume
elif (abs(Common_volume_Pro[2]-subshape_Pro[2])< 0.0003) and ((Common_volume_Pro[2] - reference_volume_Pro[ref_ind][2])<-0.0003):
temp_volume.append( Partition_volume_IDsList[sub_ind] )
if len(temp_volume) >1 : # the volume is devided
Group_partition_volume.append([Volume_name[ref_ind],temp_volume ])
print Volume_name[ref_ind]," is devided and has sub IDs:{}\n".format(temp_volume)
if len(reference_volume) != len(Group_partition_volume):
print "Geometry-volume error please check ROI diameter and DBS lead Position ",len(reference_volume),len(Group_partition_volume)
print 'Group_partition_volume',Group_partition_volume,'\n'
### find group surface ID ######################################################################
Partition_surface_IDsList = geompy.SubShapeAllIDs(Partition_profile, geompy.ShapeType["FACE"]) # list all sub shape face in Partition
print 'Partition_surface_IDsList',Partition_surface_IDsList,'\n'
sub_face = [] ## store devided faces
for reff_ind in range (0, len (reference_surface)):
temp_surface = []
for subf_ind in range (0, len(Partition_surface_IDsList)):
subshapef = geompy.GetSubShape(Partition_profile, [Partition_surface_IDsList[subf_ind]]) # get subshape
Common_face = geompy.MakeCommonList([subshapef, reference_surface[reff_ind]], True) # check common intersection
Common_face_Pro = geompy.BasicProperties(Common_face)
subshapef_Pro = geompy.BasicProperties(subshapef) # extract volume of subshape
print "area difference",abs(Common_face_Pro[1]-subshapef_Pro[1]),"/",abs(Common_face_Pro[1]-reference_surface_Pro[reff_ind][1])
# if ( common face = subface) and (common face = ref face) => ref face = sub face
if (abs(Common_face_Pro[1]-subshapef_Pro[1])<0.000001 )and (abs(Common_face_Pro[1]-reference_surface_Pro[reff_ind][1])<0.000001):
Group_partition_surface.append([ Contact_name[reff_ind],Partition_surface_IDsList[subf_ind] ])
# if ( common face = subface) and (common face < ref face) => sub face belong to ref face
elif (abs(Common_face_Pro[1]-subshapef_Pro[1])<0.000001 ) and ((Common_face_Pro[1] - reference_surface_Pro[reff_ind][1])<-0.000001):
temp_surface.append(Partition_surface_IDsList[subf_ind])
if len(temp_surface) >1 : # the face is devided
Group_partition_surface.append( [Contact_name[reff_ind],temp_surface ])
print Contact_name[reff_ind]," is devided and has sub IDs:{}\n".format(temp_surface)
if len(reference_surface) != len(Group_partition_surface): #+len(Group_partition_Multi_surface):
print "Geometry-Surface error please check ROI diameter and DBS lead Position ",len(reference_surface),len(Group_partition_surface),'\n'
print 'Group_partition_surface',Group_partition_surface,'\n'
if(Lead2nd_Enable):
Partition_profile = geompy.MakePartition(VolumeObject1+VolumeObject2+ContactObject1+ContactObject2+[Rest], [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
else:
Partition_profile = geompy.MakePartition(VolumeObject1+ContactObject1+[Rest], [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
new_volume_ID= geompy.SubShapeAllIDs(Partition_profile, geompy.ShapeType["SOLID"])
ID= list(set(Partition_volume_IDsList) ^ set (new_volume_ID))
Group_partition_volume.append(['Rest_1',ID[0]])
print "REST ID:",ID
print 'Group_partition_volume',Group_partition_volume,'\n'
print"Create volume and surface group under partition_profile\n"
for i_solid in range (0,len (Group_partition_volume)):
Group_volume[i_solid] = geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])
if (isinstance (Group_partition_volume[i_solid][1],list) == False):
geompy.UnionIDs(Group_volume[i_solid], [Group_partition_volume[i_solid][1]])
if (isinstance (Group_partition_volume[i_solid][1],list) == True):
geompy.UnionIDs(Group_volume[i_solid], Group_partition_volume[i_solid][1])
#############################################
for i_surface in range (0,len (Group_partition_surface)):
Group_surface[i_surface] = geompy.CreateGroup(Partition_profile, geompy.ShapeType["FACE"])
if (isinstance (Group_partition_surface[i_surface][1],list) == False): # not a list
geompy.UnionIDs(Group_surface[i_surface], [Group_partition_surface[i_surface][1]])
if (isinstance (Group_partition_surface[i_surface][1],list) == True): # it is a list
geompy.UnionIDs(Group_surface[i_surface], Group_partition_surface[i_surface][1])
print "Translate whole partition to Xm,Ym,Zm\n"
geompy.TranslateDXDYDZ(Partition_profile, Xm, Ym, Zm)
### add Vertices to geometry
if(Vertice_enable):
for ver_ind in range (0,number_vertex):
print"Add vertices to model\n"
Vert.append(geompy.MakeVertex(Vert_array[ver_ind][0],Vert_array[ver_ind][1],Vert_array[ver_ind][2]))
geompy.TranslateDXDYDZ(Vert[ver_ind], Xm, Ym, Zm) ###Translate vertices to Xm,Ym,Zm
geompy.addToStudy( Vert[ver_ind], 'Vert_{}'.format(ver_ind))
print"add to study\n"
############################################ end of extra code 2 ############################################
#############################################################################################################
geompy.addToStudy( O, 'O' )
geompy.addToStudy( OX, 'OX' )
geompy.addToStudy( OY, 'OY' )
geompy.addToStudy( OZ, 'OZ' )
#geompy.addToStudy( Circle_1, 'Circle_1' )
geompy.addToStudy( Contact_1, 'Contact_1' )
geompy.addToStudy( Contact_2, 'Contact_2' )
geompy.addToStudy( Contact_3, 'Contact_3' )
geompy.addToStudy( Contact_4, 'Contact_4' )
geompy.addToStudy( CV1, 'CV1' )
geompy.addToStudy( CV2, 'CV2' )
geompy.addToStudy( CV3, 'CV3' )
geompy.addToStudy( CV4, 'CV4' )
#geompy.addToStudy( Cylinder_1, 'Cylinder_1' )
#geompy.addToStudy( Sphere_1, 'Sphere_1' )
#geompy.addToStudy( Fuse_1, 'Fuse_1' )
#geompy.addToStudy( Cylinder_2, 'Cylinder_2' )
#geompy.addToStudy( Sphere_2, 'Sphere_2' )
#geompy.addToStudy( Fuse_2, 'Fuse_2' )
#geompy.addToStudy( encap_layer, 'encap_layer' )
#geompy.addToStudy( Sphere_ROI, 'Sphere_ROI' )
geompy.addToStudy( ROI, 'ROI' )
geompy.addToStudy( encap_outer_ROI, 'encap_outer_ROI' )
geompy.addToStudy( encap_inner_ROI, 'encap_inner_ROI' )
geompy.addToStudy( Fuse_all_lead_encap_ROI, 'Fuse_all_lead_encap_ROI' )
################################################################################################################
####################################### extra code 3 V10 15/12/18##############################################/
#for i in range(0,len(VolumeObject2)):/
# geompy.addToStudy( VolumeObject2[i], 'VolumeObject2_{}'.format(i) )
#for i in range(0,len(ContactObject2)):
# geompy.addToStudy( ContactObject2[i], 'ContactObject2_{}'.format(i) )
#for i in range(0,len(VolumeObject1)):
# geompy.addToStudy( VolumeObject1[i], 'VolumeObject1_{}'.format(i) )
#for i in range(0,len(ContactObject1)):
# geompy.addToStudy( ContactObject1[i], 'ContactObject1_{}'.format(i) )
geompy.addToStudy( Partition_profile, 'Partition_profile' )
for i_solid1 in range (0,len (Group_partition_volume)):
geompy.addToStudyInFather( Partition_profile, Group_volume [i_solid1], Group_partition_volume[i_solid1][0])
for i_surface1 in range (0,len (Group_partition_surface)):
geompy.addToStudyInFather( Partition_profile, Group_surface [i_surface1], Group_partition_surface[i_surface1][0])
##################################### end of extra code 3##########################################
###################################################################################################
Contact1_1=Group_surface[0]
Contact1_2=Group_surface[1]
Contact1_3=Group_surface[2]
Contact1_4=Group_surface[3]
encap_inner_ROI1=Group_volume[2]
encap_outer_ROI1=Group_volume[0]
ROI1=Group_volume[1]
Rest_1=Group_volume[7]
Floating_contacts=[]
float_indices=[]
for i in xrange(len(Phi_vector)):
if Phi_vector[i]==None:
Floating_contacts.append(Group_volume[i+3]) #because the first contact is Group_volume[3]
float_indices.append(i+3)
Auto_group_for_floating = geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])
geompy.UnionList(Auto_group_for_floating, Floating_contacts[:])
geompy.addToStudyInFather( Partition_profile, Auto_group_for_floating, 'Auto_group_for_floating' )
###
### SMESH component
###
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(theStudy)
Mesh_1 = smesh.Mesh(Partition_profile)
NETGEN_1D_2D_3D = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D)
NETGEN_3D_Parameters_1 = NETGEN_1D_2D_3D.Parameters()
NETGEN_3D_Parameters_1.SetMaxSize( 25.4615 )
NETGEN_3D_Parameters_1.SetSecondOrder( 0 )
NETGEN_3D_Parameters_1.SetOptimize( 1 )
NETGEN_3D_Parameters_1.SetFineness( 0 )
NETGEN_3D_Parameters_1.SetMinSize( 0.000374134 )
NETGEN_3D_Parameters_1.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_1.SetFuseEdges( 1 )
NETGEN_3D_Parameters_1.SetQuadAllowed( 0 )
NETGEN_1D_2D = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_1)
Sub_mesh_1 = NETGEN_1D_2D.GetSubMesh()
NETGEN_2D_Parameters_1 = NETGEN_1D_2D.Parameters()
NETGEN_2D_Parameters_1.SetMaxSize( 0.05 )
NETGEN_2D_Parameters_1.SetSecondOrder( 0 )
NETGEN_2D_Parameters_1.SetOptimize( 1 )
NETGEN_2D_Parameters_1.SetFineness( 4 )
NETGEN_2D_Parameters_1.SetMinSize( 0.0001 )
NETGEN_2D_Parameters_1.SetUseSurfaceCurvature( 1 )
NETGEN_2D_Parameters_1.SetFuseEdges( 1 )
NETGEN_2D_Parameters_1.SetQuadAllowed( 0 )
NETGEN_1D_2D_1 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_2)
Sub_mesh_2 = NETGEN_1D_2D_1.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_2)
NETGEN_1D_2D_2 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_3)
Sub_mesh_3 = NETGEN_1D_2D_2.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_3)
NETGEN_1D_2D_3 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_4)
Sub_mesh_4 = NETGEN_1D_2D_3.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_4)
NETGEN_1D_2D_3D_1 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=encap_inner_ROI1)
Sub_mesh_5 = NETGEN_1D_2D_3D_1.GetSubMesh()
NETGEN_3D_Parameters_2 = NETGEN_1D_2D_3D_1.Parameters()
NETGEN_3D_Parameters_2.SetMaxSize( encap_thickness )
NETGEN_3D_Parameters_2.SetSecondOrder( 0 )
NETGEN_3D_Parameters_2.SetOptimize( 1 )
NETGEN_3D_Parameters_2.SetFineness( 2 )
NETGEN_3D_Parameters_2.SetMinSize( 0.00283583 )
NETGEN_3D_Parameters_2.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_2.SetFuseEdges( 1 )
NETGEN_3D_Parameters_2.SetQuadAllowed( 0 )
isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5 ] ])
NETGEN_1D_2D_3D_2 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=encap_outer_ROI1)
Sub_mesh_6 = NETGEN_1D_2D_3D_2.GetSubMesh()
NETGEN_3D_Parameters_3 = NETGEN_1D_2D_3D_2.Parameters()
NETGEN_3D_Parameters_3.SetMaxSize( encap_thickness )
NETGEN_3D_Parameters_3.SetSecondOrder( 0 )
NETGEN_3D_Parameters_3.SetOptimize( 1 )
NETGEN_3D_Parameters_3.SetFineness( 2 )
NETGEN_3D_Parameters_3.SetMinSize( 0.0333798 )
NETGEN_3D_Parameters_3.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_3.SetFuseEdges( 1 )
NETGEN_3D_Parameters_3.SetQuadAllowed( 0 )
isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5, Sub_mesh_6 ] ])
NETGEN_1D_2D_3D_3 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=ROI1)
Sub_mesh_7 = NETGEN_1D_2D_3D_3.GetSubMesh()
NETGEN_3D_Parameters_4 = NETGEN_1D_2D_3D_3.Parameters()
NETGEN_3D_Parameters_4.SetMaxSize( 25.4615 )
NETGEN_3D_Parameters_4.SetSecondOrder( 0 )
NETGEN_3D_Parameters_4.SetOptimize( 1 )
NETGEN_3D_Parameters_4.SetFineness( 2 )
NETGEN_3D_Parameters_4.SetMinSize( 0.00328242 )
NETGEN_3D_Parameters_4.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_4.SetFuseEdges( 1 )
NETGEN_3D_Parameters_4.SetQuadAllowed( 0 )
isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5, Sub_mesh_6, Sub_mesh_7 ] ])
NETGEN_1D_2D_3D_4 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=Rest_1)
Sub_mesh_8 = NETGEN_1D_2D_3D_4.GetSubMesh()
NETGEN_3D_Parameters_5 = NETGEN_1D_2D_3D_4.Parameters()
NETGEN_3D_Parameters_5.SetMaxSize( 2.5 )
NETGEN_3D_Parameters_5.SetSecondOrder( 0 )
NETGEN_3D_Parameters_5.SetOptimize( 1 )
NETGEN_3D_Parameters_5.SetFineness( 2 )
NETGEN_3D_Parameters_5.SetMinSize( 0.000374134 )
NETGEN_3D_Parameters_5.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_5.SetFuseEdges( 1 )
NETGEN_3D_Parameters_5.SetQuadAllowed( 0 )
NETGEN_1D_2D_3D_5 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=Auto_group_for_floating)
Sub_mesh_9 = NETGEN_1D_2D_3D_5.GetSubMesh()
NETGEN_3D_Parameters_6 = NETGEN_1D_2D_3D_5.Parameters()
NETGEN_3D_Parameters_6.SetMaxSize( 25.4615 )
NETGEN_3D_Parameters_6.SetSecondOrder( 0 )
NETGEN_3D_Parameters_6.SetOptimize( 1 )
NETGEN_3D_Parameters_6.SetFineness( 2 )
NETGEN_3D_Parameters_6.SetMinSize( 0.000374134 )
NETGEN_3D_Parameters_6.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_6.SetFuseEdges( 1 )
NETGEN_3D_Parameters_6.SetQuadAllowed( 0 )
isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5,Sub_mesh_9,Sub_mesh_6, Sub_mesh_7, Sub_mesh_8 ] ])
#if Phi_vector[0]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_1 )
#if Phi_vector[1]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_2 )
#if Phi_vector[2]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_3 )
#if Phi_vector[3]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_4 )
isDone = Mesh_1.Compute()
if Phi_vector[0]!=None:
Mesh_1.GroupOnGeom(Contact1_1,'C1_1',SMESH.FACE)
if Phi_vector[1]!=None:
Mesh_1.GroupOnGeom(Contact1_2,'C1_2',SMESH.FACE)
if Phi_vector[2]!=None:
Mesh_1.GroupOnGeom(Contact1_3,'C1_3',SMESH.FACE)
if Phi_vector[3]!=None:
Mesh_1.GroupOnGeom(Contact1_4,'C1_4',SMESH.FACE)
Encap_contact = Mesh_1.GroupOnGeom(encap_inner_ROI1,'Encap_contact',SMESH.VOLUME)
Encap_rest = Mesh_1.GroupOnGeom(encap_outer_ROI1,'Encap_rest',SMESH.VOLUME)
RegOfInt = Mesh_1.GroupOnGeom(ROI1,'RegOfInt',SMESH.VOLUME)
Rst = Mesh_1.GroupOnGeom(Rest_1,'Rst',SMESH.VOLUME)
Flt_cnt=Mesh_1.GroupOnGeom(Auto_group_for_floating,'Flt_cnt',SMESH.VOLUME)
## Set names of Mesh objects
smesh.SetName(NETGEN_1D_2D_3D.GetAlgorithm(), 'NETGEN 1D-2D-3D')
smesh.SetName(NETGEN_1D_2D.GetAlgorithm(), 'NETGEN 1D-2D')
smesh.SetName(NETGEN_2D_Parameters_1, 'NETGEN 2D Parameters_1')
smesh.SetName(NETGEN_3D_Parameters_2, 'NETGEN 3D Parameters_2')
smesh.SetName(NETGEN_3D_Parameters_1, 'NETGEN 3D Parameters_1')
smesh.SetName(NETGEN_3D_Parameters_5, 'NETGEN 3D Parameters_5')
smesh.SetName(NETGEN_3D_Parameters_6, 'NETGEN 3D Parameters_6')
smesh.SetName(NETGEN_3D_Parameters_3, 'NETGEN 3D Parameters_3')
smesh.SetName(NETGEN_3D_Parameters_4, 'NETGEN 3D Parameters_4')
smesh.SetName(Sub_mesh_4, 'Sub-mesh_4')
smesh.SetName(Sub_mesh_1, 'Sub-mesh_1')
smesh.SetName(Sub_mesh_3, 'Sub-mesh_3')
smesh.SetName(Sub_mesh_2, 'Sub-mesh_2')
smesh.SetName(Mesh_1.GetMesh(), 'Mesh_1')
smesh.SetName(Rst, 'Rst')
smesh.SetName(Flt_cnt, 'Flt_cnt')
smesh.SetName(RegOfInt, 'RegOfInt')
smesh.SetName(Encap_rest, 'Encap_rest')
smesh.SetName(Encap_contact, 'Encap_contact')
smesh.SetName(Sub_mesh_7, 'Sub-mesh_7')
smesh.SetName(Sub_mesh_6, 'Sub-mesh_6')
smesh.SetName(Sub_mesh_5, 'Sub-mesh_5')
smesh.SetName(Sub_mesh_8, 'Sub-mesh_8')
smesh.SetName(Sub_mesh_9, 'Sub-mesh_9')
#if Phi_vector[0]!=None:
#
# smesh.SetName(C1_1, 'C1_1')
#if Phi_vector[1]!=None:
#
# smesh.SetName(C1_2, 'C1_2')
#if Phi_vector[2]!=None:
#
# smesh.SetName(C1_3, 'C1_3')
#if Phi_vector[3]!=None:
#
# smesh.SetName(C1_4, 'C1_4')
Mesh_1.ExportMED(os.environ['PATIENTDIR']+'/Meshes/Mesh_unref.med')
#if salome.sg.hasDesktop():
# salome.sg.updateObjBrowser(True)
import killSalome
killSalome.killAllPorts()
| gpl-3.0 |
loli/sklearn-ensembletrees | sklearn/datasets/species_distributions.py | 10 | 7844 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3
from urllib.request import urlopen
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6,
dtype=np.int16):
"""
load a coverage file.
This will return a numpy array of the given dtype
"""
try:
header = [F.readline() for i in range(header_length)]
except:
F = open(F)
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header['NODATA_value']
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : string or file object
file object or name of file
Returns
-------
rec : np.ndarray
record array representing the data
"""
try:
names = F.readline().strip().split(',')
except:
F = open(F)
names = F.readline().strip().split(',')
rec = np.loadtxt(F, skiprows=1, delimiter=',',
dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages,
dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
kudkudak/r2-learner | scripts/fit_r2.py | 2 | 1992 | #!/usr/bin/env python
import sys, os, time, traceback
from sklearn.grid_search import ParameterGrid
from multiprocessing import Pool
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from misc.experiment_utils import save_exp, get_exp_logger, shorten_params, exp_done
from r2 import *
from misc.data_api import *
from fit_models import *
from elm import ELM
datasets = fetch_all_datasets()
n_jobs = 2
r2svm_params = {'beta': [0.1, 0.5, 1.0, 1.5, 2.0],
'fit_c': ['random', None],
'scale': [True, False],
'recurrent': [True, False],
'use_prev': [True, False],
'seed': [666]}
r2elm_params = {'h': [i for i in xrange(20,101,20)],
'beta': [0.1, 0.5, 1.0, 1.5, 2.0],
'fit_c': ['random', None],
'scale': [True, False],
'recurrent': [True, False],
'use_prev': [True, False],
'seed': [666]}
exp_params = [ {'model': R2SVMLearner, 'params': r2svm_params, 'exp_name': 'test', 'model_name': 'r2svm'},
{'model': R2ELMLearner, 'params': r2elm_params, 'exp_name': 'test', 'model_name': 'r2elm'}]
def gen_params():
for data in datasets:
for r in exp_params:
param_list = ParameterGrid(r['params'])
for param in param_list:
yield {'model': r['model'], 'params': param, 'data': data,
'name': r['exp_name'], 'model_name': r['model_name']}
params = list(gen_params())
def run(p):
try:
k_fold(base_model=p['model'], params=p['params'], data=p['data'], exp_name=p['name'],
model_name=p['model_name'], all_layers=True)
except:
print p['model']
print traceback.format_exc()
pool = Pool(n_jobs)
rs = pool.map_async(run, params, 1)
while True:
if rs.ready():
break
remaining = rs._number_left
print "Waiting for", remaining, "tasks to complete"
time.sleep(3)
| mit |
depet/scikit-learn | examples/covariance/plot_covariance_estimation.py | 8 | 5036 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import pylab as pl
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = pl.figure()
pl.title("Regularized covariance: likelihood and shrinkage coefficient")
pl.xlabel('Regularizaton parameter: shrinkage coefficient')
pl.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
pl.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
pl.plot(pl.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((pl.ylim()[1] - pl.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
pl.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
pl.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
pl.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
pl.ylim(ymin, ymax)
pl.xlim(xmin, xmax)
pl.legend()
pl.show()
| bsd-3-clause |
ephes/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
Adai0808/scikit-learn | benchmarks/bench_mnist.py | 154 | 6006 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
tamasgal/km3pipe | examples/plot_pmt_directions.py | 1 | 1392 | # -*- coding: utf-8 -*-
"""
==================
PMT Directions
==================
Plot the PMT directions for a given DOM.
"""
# Author: Tamas Gal <[email protected]>
# License: BSD-3
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from km3net_testdata import data_path
import km3pipe as kp
import km3pipe.style
km3pipe.style.use("km3pipe")
km3pipe.style.use("talk")
detx = data_path(
"detx/orca_115strings_av23min20mhorizontal_18OMs_alt9mvertical_v1.detx"
)
det = kp.hardware.Detector(detx)
#####################################################
# Alternatively, you can use the `det_id` to retrieve the geometry from the DB.
# det = kp.hardware.Detector(det_id=29)
#####################################################
# Let's take the first DOM ID
dom_id = det.dom_ids[0]
#####################################################
# ...and get the table of the PMTs in the chosen DOM:
pmts = det.pmts[det.pmts.dom_id == dom_id]
#####################################################
# The `quiver` function can directly plot the PMT data, since those are
# stored as numpy arrays.
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.quiver(
pmts.pos_x, pmts.pos_y, pmts.pos_z, pmts.dir_x, pmts.dir_y, pmts.dir_z, length=0.1
)
ax.set_xlabel("x [m]")
ax.set_ylabel("y [m]")
ax.set_zlabel("z [m]")
ax.set_title("PMT directions on DOM {}".format(dom_id))
| mit |
BorisJeremic/Real-ESSI-Examples | education_examples/_Chapter_Material_Behaviour_Examples/Multi_Yield_Surface_DruckerPrager_GGmax/plot_stress_strain.py | 1 | 1669 | import numpy as np
import matplotlib.pyplot as plt
# target
userInput1= [0,1E-6,1E-5,5E-5,1E-4, 0.0005, 0.001, 0.005, 0.01];
userInput2= [1,0.99563892,0.96674888,0.87318337,0.78735192,0.46719464,0.32043423,0.10940113,0.06347752];
Gmax = 3E8;
poisson = 0.0;
# #################################
gamma = userInput1
GGmax = userInput2
G=[Gmax * item for item in GGmax]
tau = np.zeros(len(gamma))
for it in xrange(1,len(gamma)):
tau[it] = gamma[it] * G[it]
print tau
epsilon = [item/2. for item in gamma]
# #################################
# Plot stress-strain
# #################################
strain = np.loadtxt("strain.feioutput")
stress = np.loadtxt("stress.feioutput")
essiGamma = [2*item for item in strain]
plt.plot(gamma, tau, 'b-', label='Input' )
plt.plot(essiGamma, stress, 'r-', label=' ESSI')
plt.legend(loc=2)
plt.xlabel('Strain / (unitless)')
plt.ylabel('Stress / (Pa)')
plt.title('Multi-Yield-Surface Drucker-Prager: Stress-Strain')
plt.grid()
plt.box()
plt.savefig('backbone.pdf', transparent=True, bbox_inches='tight')
plt.show()
# #################################
# Plot GGmax
# #################################
# # avoid the divide by zero
# stress[0]=stress[1]
# strain[0]=strain[1]
# essiG = [a/b/2. for a,b in zip(stress, strain)]
# essiGGmax = [item/Gmax for item in essiG]
# plt.semilogx(essiGamma, essiGGmax, label='ESSI')
# plt.semilogx(gamma , GGmax , label='Input')
# plt.legend(loc=3)
# plt.title('Multi-Yield-Surface Drucker-Prager G/Gmax')
# plt.xlabel('Strain / (unitless)')
# plt.ylabel('G/Gmax / (unitless)')
# plt.grid()
# plt.box()
# plt.savefig('GGmax.pdf', transparent=True, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
rmcgibbo/scipy | scipy/stats/tests/test_morestats.py | 2 | 44608 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (TestCase, run_module_suite, assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_raises, assert_, assert_allclose, assert_equal, dec, assert_warns)
from scipy import stats
from common_tests import check_named_results
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs(TestCase):
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestShapiro(TestCase):
def test_basic(self):
x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46,
4.43,0.21,4.75,0.71,1.52,3.24,
0.93,0.42,4.97,9.53,4.55,0.47,6.66]
w,pw = stats.shapiro(x1)
assert_almost_equal(w,0.90047299861907959,6)
assert_almost_equal(pw,0.042089745402336121,6)
x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11,
3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69,
0.08,3.67,2.81,3.49]
w,pw = stats.shapiro(x2)
assert_almost_equal(w,0.9590270,6)
assert_almost_equal(pw,0.52460,3)
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
class TestAnderson(TestCase):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A,crit,sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1,'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A,crit,sig = stats.anderson(x2,'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAndersonKSamp(TestCase):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4),
midrank=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0021, 4)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0020, 4)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari(TestCase):
def test_small(self):
x = [1,2,3,3,4]
y = [3,2,6,1,6,1,4,1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
W, pval = stats.ansari(x,y)
assert_almost_equal(W,23.5,11)
assert_almost_equal(pval,0.13499256881897437,11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99))
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W,185.5,11)
assert_almost_equal(pval,0.18145819972867083,11)
def test_exact(self):
W,pval = stats.ansari([1,2,3,4],[15,5,20,8,10,12])
assert_almost_equal(W,10.0,11)
assert_almost_equal(pval,0.533333333333333333,7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBartlett(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T,20.78587342806484,7)
assert_almost_equal(pval,0.0136358632781,7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
class TestLevene(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W,1.7059176930008939,7)
assert_almost_equal(pval,0.0990829755522,7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed', proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1,1,21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBinomP(TestCase):
def test_data(self):
pval = stats.binom_test(100,250)
assert_almost_equal(pval,0.0018833009350757682,11)
pval = stats.binom_test(201,405)
assert_almost_equal(pval,0.92085205962670713,11)
pval = stats.binom_test([682,243],p=3.0/4)
assert_almost_equal(pval,0.38249155957481695,11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1,2,3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
def test_alternatives(self):
res = stats.binom_test(51, 235, p=1./6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = stats.binom_test(51, 235, p=1./6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestFligner(TestCase):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1,x1**2),
(3.2282229927203536, 0.072379187848207877), 11)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')
Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood(TestCase):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478), 11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
#Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(TestCase):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
def test_wilcoxon_bad_arg():
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1,2])
assert_raises(ValueError, stats.wilcoxon, [1,2], [1,2], "dummy")
def test_mvsdist_bad_arg():
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
class TestKstat(TestCase):
# Note: `kstat` still needs review. Statistics Review issue gh-675.
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = []
for n in [1, 2, 3, 4]:
moments.append(stats.kstat(data, n))
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
def test_kstatvar_bad_arg():
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182, 0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10, dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
def test_ppcc_max_bad_arg():
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
class TestBoxcox_llf(TestCase):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
class TestBoxcox(TestCase):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
class TestBoxcoxNormmax(TestCase):
def setUp(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestCircFuncs(TestCase):
def test_circfuncs(self):
x = np.array([355,5,2,359,10,350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20,21,22,18,19,20.5,19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355,5,2,359,10,350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_accuracy_wilcoxon():
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.00197547303533107)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes():
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie():
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
class TestMedianTest(TestCase):
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], ties="foo")
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5], foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency table,
# so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
rpereira-dev/ENSIIE | UE/L1/planet1.py | 1 | 2366 | # import des dependances (cos, sin, arctan et sqrt)
import math
import matplotlib.pyplot as plt
import numpy as np
# l'animation a ete inspire de:
# https://stackoverflow.com/questions/10896054/simple-animation-of-2d-coordinates-using-matplotlib-and-pyplot
# pas d'integration (en secondes)
dt = 0.01
# nombre de FPS de l'animation
FPS = 60
# constantes physiques
G = 6.674 * 10e-11
Ms = 1.989 * 10e30
# liste des planetes, format: [nom, masse, x, y, vx, vy, ax, ay]
# (a l'instant t0)
planetes = [
# conditions initiales de la Terre:
# angle par rapport au soleil de 0
# la vitesse de la Terre est d'environ 30km.s-1
# la distance au Soleil est d'environ 149.6 millards de m
["Terre", 5.972*10e24, 1.496*10e11, 0.0, 0.0, 30.0*10e3, 0.0, 0.0],
["Mars", 6.419*10e23, 2.066*10e11, 0.0, 0.0, 40.0*10e3, 0.0, 0.0]
]
# constantes correspond aux index dans le tableau
NOM = 0
MASSE = 1
X = 2
Y = 3
VX = 4
VY = 5
AX = 6
AY = 7
# pyplot initialisation
(fig, ax) = plt.subplots()
xs = [planete[X] for planete in planetes]
ys = [planete[Y] for planete in planetes]
points, = ax.plot(xs, ys, marker='o', linestyle='None')
ax.set_xlim(-1.0, 1.0)
ax.set_ylim(-1.0, 1.0)
# integration infini
while True:
# on integre N fois avant d'afficher
N = 100000
# (on passe alors en accelerer N * dt secondes)
for i in xrange(0, N):
for planete in planetes:
# coordonnees polaires
d = math.sqrt(planete[X]**2 + planete[Y]**2)
theta = math.atan2(planete[Y], planete[X])
# met a jour l'acceleration a partir de l'equation differentielle:
# M * A = F
# (on a simplifier la masse)
F = -G * Ms / (d * d)
# on projette sur les 2 axes
planete[AX] = F * math.cos(theta)
planete[AY] = F * math.sin(theta)
# met a jour la vitesse (v = a * dt)
planete[VX] = planete[VX] + dt * planete[AX]
planete[VY] = planete[VY] + dt * planete[AY]
# met a jour la position
planete[X] = planete[X] + dt * planete[VX]
planete[Y] = planete[Y] + dt * planete[VY]
# on afffiche
# distances du soleil
# position relativement au soleil, mis a l'echelle [-1.0, 1.0]
MX = 4.0*10e11 # (voir Aphelie)
MY = 1.47*10e11 # (voir Perilie)
xs = np.array([planete[X] / MX for planete in planetes])
ys = np.array([planete[Y] / MY for planete in planetes])
print(xs, ys)
# on plot
points.set_data(xs, ys)
plt.pause(0.01)
| gpl-3.0 |
kernc/scikit-learn | sklearn/semi_supervised/label_propagation.py | 14 | 15965 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
mattilyra/scikit-learn | examples/gaussian_process/plot_gpc_isoprobability.py | 5 | 3049 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Iso-probability lines for Gaussian Processes classification (GPC)
=================================================================
A two-dimensional classification example showing iso-probability lines for
the predicted probabilities.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Adapted to GaussianProcessClassifier:
# Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import DotProduct, ConstantKernel as C
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = plt.figure(1)
ax = fig.gca()
ax.axes.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
cax = plt.imshow(y_prob, cmap=cm.gray_r, alpha=0.8,
extent=(-lim, lim, -lim, lim))
norm = plt.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = plt.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
plt.clim(0, 1)
plt.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
plt.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = plt.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = plt.contour(x1, x2, y_prob, [0.666], colors='b',
linestyles='solid')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.5], colors='k',
linestyles='dashed')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.334], colors='r',
linestyles='solid')
plt.clabel(cs, fontsize=11)
plt.show()
| bsd-3-clause |
MartialD/hyperspy | hyperspy/_signals/eds.py | 2 | 44775 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import itertools
import logging
import numpy as np
import warnings
import matplotlib
from matplotlib import pyplot as plt
from distutils.version import LooseVersion
from hyperspy import utils
from hyperspy.signal import BaseSignal
from hyperspy._signals.signal1d import Signal1D, LazySignal1D
from hyperspy.misc.elements import elements as elements_db
from hyperspy.misc.eds import utils as utils_eds
from hyperspy.misc.utils import isiterable
from hyperspy.utils.plot import markers
from hyperspy.docstrings.plot import (BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT1D_DOCSTRING)
_logger = logging.getLogger(__name__)
class EDS_mixin:
_signal_type = "EDS"
def __init__(self, *args, **kwards):
super().__init__(*args, **kwards)
if self.metadata.Signal.signal_type == 'EDS':
warnings.warn('The microscope type is not set. Use '
'set_signal_type(\'EDS_TEM\') '
'or set_signal_type(\'EDS_SEM\')')
self.metadata.Signal.binned = True
self._xray_markers = {}
def _get_line_energy(self, Xray_line, FWHM_MnKa=None):
"""
Get the line energy and the energy resolution of a Xray line.
The return values are in the same units than the signal axis
Parameters
----------
Xray_line : strings
Valid element X-ray lines e.g. Fe_Kb
FWHM_MnKa: {None, float, 'auto'}
The energy resolution of the detector in eV
if 'auto', used the one in
'self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa'
Returns
-------
float: the line energy, if FWHM_MnKa is None
(float,float): the line energy and the energy resolution, if FWHM_MnKa
is not None
"""
units_name = self.axes_manager.signal_axes[0].units
if FWHM_MnKa == 'auto':
if self.metadata.Signal.signal_type == "EDS_SEM":
FWHM_MnKa = self.metadata.Acquisition_instrument.SEM.\
Detector.EDS.energy_resolution_MnKa
elif self.metadata.Signal.signal_type == "EDS_TEM":
FWHM_MnKa = self.metadata.Acquisition_instrument.TEM.\
Detector.EDS.energy_resolution_MnKa
else:
raise NotImplementedError(
"This method only works for EDS_TEM or EDS_SEM signals. "
"You can use `set_signal_type(\"EDS_TEM\")` or"
"`set_signal_type(\"EDS_SEM\")` to convert to one of these"
"signal types.")
line_energy = utils_eds._get_energy_xray_line(Xray_line)
if units_name == 'eV':
line_energy *= 1000
if FWHM_MnKa is not None:
line_FWHM = utils_eds.get_FWHM_at_Energy(
FWHM_MnKa, line_energy / 1000) * 1000
elif units_name == 'keV':
if FWHM_MnKa is not None:
line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa,
line_energy)
else:
raise ValueError(
"%s is not a valid units for the energy axis. "
"Only `eV` and `keV` are supported. "
"If `s` is the variable containing this EDS spectrum:\n "
">>> s.axes_manager.signal_axes[0].units = \'keV\' \n"
% units_name)
if FWHM_MnKa is None:
return line_energy
else:
return line_energy, line_FWHM
def _get_beam_energy(self):
"""
Get the beam energy.
The return value is in the same units than the signal axis
"""
if "Acquisition_instrument.SEM.beam_energy" in self.metadata:
beam_energy = self.metadata.Acquisition_instrument.SEM.beam_energy
elif "Acquisition_instrument.TEM.beam_energy" in self.metadata:
beam_energy = self.metadata.Acquisition_instrument.TEM.beam_energy
else:
raise AttributeError(
"The beam energy is not defined in `metadata`. "
"Use `set_microscope_parameters` to set it.")
units_name = self.axes_manager.signal_axes[0].units
if units_name == 'eV':
beam_energy *= 1000
return beam_energy
def _get_xray_lines_in_spectral_range(self, xray_lines):
"""
Return the lines in the energy range
Parameters
----------
xray_lines: List of string
The xray_lines
Return
------
The list of xray_lines in the energy range
"""
ax = self.axes_manager.signal_axes[0]
low_value = ax.low_value
high_value = ax.high_value
try:
if self._get_beam_energy() < high_value:
high_value = self._get_beam_energy()
except AttributeError:
# in case the beam energy is not defined in the metadata
pass
xray_lines_in_range = []
xray_lines_not_in_range = []
for xray_line in xray_lines:
line_energy = self._get_line_energy(xray_line)
if low_value < line_energy < high_value:
xray_lines_in_range.append(xray_line)
else:
xray_lines_not_in_range.append(xray_line)
return xray_lines_in_range, xray_lines_not_in_range
def sum(self, axis=None, out=None):
if axis is None:
axis = self.axes_manager.navigation_axes
# modify time spend per spectrum
s = super().sum(axis=axis, out=out)
s = out or s
mp = None
if s.metadata.get_item("Acquisition_instrument.SEM"):
mp = s.metadata.Acquisition_instrument.SEM
mp_old = self.metadata.Acquisition_instrument.SEM
elif s.metadata.get_item("Acquisition_instrument.TEM"):
mp = s.metadata.Acquisition_instrument.TEM
mp_old = self.metadata.Acquisition_instrument.TEM
if mp is not None and mp.has_item('Detector.EDS.live_time'):
mp.Detector.EDS.live_time = mp_old.Detector.EDS.live_time * \
self.data.size / s.data.size
if out is None:
return s
sum.__doc__ = Signal1D.sum.__doc__
def rebin(self, new_shape=None, scale=None, crop=True, out=None):
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale,)
m = super().rebin(new_shape=new_shape, scale=scale, crop=crop, out=out)
m = out or m
time_factor = np.prod([factors[axis.index_in_array]
for axis in m.axes_manager.navigation_axes])
aimd = m.metadata.Acquisition_instrument
if "Acquisition_instrument.SEM.Detector.EDS.real_time" in m.metadata:
aimd.SEM.Detector.EDS.real_time *= time_factor
elif "Acquisition_instrument.TEM.Detector.EDS.real_time" in m.metadata:
aimd.TEM.Detector.EDS.real_time *= time_factor
else:
_logger.info(
"real_time could not be found in the metadata and has not been updated.")
if "Acquisition_instrument.SEM.Detector.EDS.live_time" in m.metadata:
aimd.SEM.Detector.EDS.live_time *= time_factor
elif "Acquisition_instrument.TEM.Detector.EDS.live_time" in m.metadata:
aimd.TEM.Detector.EDS.live_time *= time_factor
else:
_logger.info(
"Live_time could not be found in the metadata and has not been updated.")
if out is None:
return m
else:
out.events.data_changed.trigger(obj=out)
return m
rebin.__doc__ = BaseSignal.rebin.__doc__
def set_elements(self, elements):
"""Erase all elements and set them.
Parameters
----------
elements : list of strings
A list of chemical element symbols.
See also
--------
add_elements, set_lines, add_lines
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> print(s.metadata.Sample.elements)
>>> s.set_elements(['Al'])
>>> print(s.metadata.Sample.elements)
['Al' 'C' 'Cu' 'Mn' 'Zr']
['Al']
"""
# Erase previous elements and X-ray lines
if "Sample.elements" in self.metadata:
del self.metadata.Sample.elements
self.add_elements(elements)
def add_elements(self, elements):
"""Add elements and the corresponding X-ray lines.
The list of elements is stored in `metadata.Sample.elements`
Parameters
----------
elements : list of strings
The symbol of the elements.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> print(s.metadata.Sample.elements)
>>> s.add_elements(['Ar'])
>>> print(s.metadata.Sample.elements)
['Al' 'C' 'Cu' 'Mn' 'Zr']
['Al', 'Ar', 'C', 'Cu', 'Mn', 'Zr']
See also
--------
set_elements, add_lines, set_lines
"""
if not isiterable(elements) or isinstance(elements, str):
raise ValueError(
"Input must be in the form of a list. For example, "
"if `s` is the variable containing this EDS spectrum:\n "
">>> s.add_elements(('C',))\n"
"See the docstring for more information.")
if "Sample.elements" in self.metadata:
elements_ = set(self.metadata.Sample.elements)
else:
elements_ = set()
for element in elements:
if element in elements_db:
elements_.add(element)
else:
raise ValueError(
"%s is not a valid chemical element symbol." % element)
self.metadata.set_item('Sample.elements', sorted(list(elements_)))
def _get_xray_lines(self, xray_lines=None, only_one=None,
only_lines=('a',)):
if xray_lines is None:
if 'Sample.xray_lines' in self.metadata:
xray_lines = self.metadata.Sample.xray_lines
elif 'Sample.elements' in self.metadata:
xray_lines = self._get_lines_from_elements(
self.metadata.Sample.elements,
only_one=only_one,
only_lines=only_lines)
else:
raise ValueError(
"Not X-ray line, set them with `add_elements`.")
return xray_lines
def set_lines(self,
lines,
only_one=True,
only_lines=('a',)):
"""Erase all Xrays lines and set them.
See add_lines for details.
Parameters
----------
lines : list of strings
A list of valid element X-ray lines to add e.g. Fe_Kb.
Additionally, if `metadata.Sample.elements` is
defined, add the lines of those elements that where not
given in this list.
only_one: bool
If False, add all the lines of each element in
`metadata.Sample.elements` that has not line
defined in lines. If True (default),
only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be added.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.add_lines()
>>> print(s.metadata.Sample.xray_lines)
>>> s.set_lines(['Cu_Ka'])
>>> print(s.metadata.Sample.xray_lines)
['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']
['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_La', 'Zr_La']
See also
--------
add_lines, add_elements, set_elements
"""
only_lines = utils_eds._parse_only_lines(only_lines)
if "Sample.xray_lines" in self.metadata:
del self.metadata.Sample.xray_lines
self.add_lines(lines=lines,
only_one=only_one,
only_lines=only_lines)
def add_lines(self,
lines=(),
only_one=True,
only_lines=("a",)):
"""Add X-rays lines to the internal list.
Although most functions do not require an internal list of
X-ray lines because they can be calculated from the internal
list of elements, ocassionally it might be useful to customize the
X-ray lines to be use by all functions by default using this method.
The list of X-ray lines is stored in
`metadata.Sample.xray_lines`
Parameters
----------
lines : list of strings
A list of valid element X-ray lines to add e.g. Fe_Kb.
Additionally, if `metadata.Sample.elements` is
defined, add the lines of those elements that where not
given in this list. If the list is empty (default), and
`metadata.Sample.elements` is
defined, add the lines of all those elements.
only_one: bool
If False, add all the lines of each element in
`metadata.Sample.elements` that has not line
defined in lines. If True (default),
only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be added.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.add_lines()
>>> print(s.metadata.Sample.xray_lines)
['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.set_microscope_parameters(beam_energy=30)
>>> s.add_lines()
>>> print(s.metadata.Sample.xray_lines)
['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_Ka', 'Zr_La']
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.add_lines()
>>> print(s.metadata.Sample.xray_lines)
>>> s.add_lines(['Cu_Ka'])
>>> print(s.metadata.Sample.xray_lines)
['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']
['Al_Ka', 'C_Ka', 'Cu_Ka', 'Cu_La', 'Mn_La', 'Zr_La']
See also
--------
set_lines, add_elements, set_elements
"""
only_lines = utils_eds._parse_only_lines(only_lines)
if "Sample.xray_lines" in self.metadata:
xray_lines = set(self.metadata.Sample.xray_lines)
else:
xray_lines = set()
# Define the elements which Xray lines has been customized
# So that we don't attempt to add new lines automatically
elements = set()
for line in xray_lines:
elements.add(line.split("_")[0])
for line in lines:
try:
element, subshell = line.split("_")
except ValueError:
raise ValueError(
"Invalid line symbol. "
"Please provide a valid line symbol e.g. Fe_Ka")
if element in elements_db:
elements.add(element)
if subshell in elements_db[element]['Atomic_properties'
]['Xray_lines']:
lines_len = len(xray_lines)
xray_lines.add(line)
if lines_len != len(xray_lines):
_logger.info("%s line added," % line)
else:
_logger.info("%s line already in." % line)
else:
raise ValueError(
"%s is not a valid line of %s." % (line, element))
else:
raise ValueError(
"%s is not a valid symbol of an element." % element)
xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)[1]
for xray in xray_not_here:
warnings.warn("%s is not in the data energy range." % xray)
if "Sample.elements" in self.metadata:
extra_elements = (set(self.metadata.Sample.elements) -
elements)
if extra_elements:
new_lines = self._get_lines_from_elements(
extra_elements,
only_one=only_one,
only_lines=only_lines)
if new_lines:
self.add_lines(list(new_lines) + list(lines))
self.add_elements(elements)
if not hasattr(self.metadata, 'Sample'):
self.metadata.add_node('Sample')
if "Sample.xray_lines" in self.metadata:
xray_lines = xray_lines.union(
self.metadata.Sample.xray_lines)
self.metadata.Sample.xray_lines = sorted(list(xray_lines))
def _get_lines_from_elements(self,
elements,
only_one=False,
only_lines=("a",)):
"""Returns the X-ray lines of the given elements in spectral range
of the data.
Parameters
----------
elements : list of strings
A list containing the symbol of the chemical elements.
only_one : bool
If False, add all the lines of each element in the data spectral
range. If True only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be returned.
Returns
-------
list of X-ray lines alphabetically sorted
"""
only_lines = utils_eds._parse_only_lines(only_lines)
try:
beam_energy = self._get_beam_energy()
except BaseException:
# Fall back to the high_value of the energy axis
beam_energy = self.axes_manager.signal_axes[0].high_value
lines = []
elements = [el if isinstance(el, str) else el.decode()
for el in elements]
for element in elements:
# Possible line (existing and excited by electron)
element_lines = []
for subshell in list(elements_db[element]['Atomic_properties'
]['Xray_lines'].keys()):
if only_lines and subshell not in only_lines:
continue
element_lines.append(element + "_" + subshell)
element_lines = self._get_xray_lines_in_spectral_range(
element_lines)[0]
if only_one and element_lines:
# Choose the best line
select_this = -1
element_lines.sort()
for i, line in enumerate(element_lines):
if (self._get_line_energy(line) < beam_energy / 2):
select_this = i
break
element_lines = [element_lines[select_this], ]
if not element_lines:
_logger.info(
("There is no X-ray line for element %s " % element) +
"in the data spectral range")
else:
lines.extend(element_lines)
lines.sort()
return lines
def _parse_xray_lines(self, xray_lines, only_one, only_lines):
only_lines = utils_eds._parse_only_lines(only_lines)
xray_lines = self._get_xray_lines(xray_lines, only_one=only_one,
only_lines=only_lines)
xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(
xray_lines)
for xray in xray_not_here:
warnings.warn("%s is not in the data energy range." % xray +
"You can remove it with" +
"s.metadata.Sample.xray_lines.remove('%s')"
% xray)
return xray_lines
def get_lines_intensity(self,
xray_lines=None,
integration_windows=2.,
background_windows=None,
plot_result=False,
only_one=True,
only_lines=("a",),
**kwargs):
"""Return the intensity map of selected Xray lines.
The intensities, the number of X-ray counts, are computed by
suming the spectrum over the
different X-ray lines. The sum window width
is calculated from the energy resolution of the detector
as defined in 'energy_resolution_MnKa' of the metadata.
Backgrounds average in provided windows can be subtracted from the
intensities.
Parameters
----------
xray_lines: {None, list of string}
If None,
if `metadata.Sample.elements.xray_lines` contains a
list of lines use those.
If `metadata.Sample.elements.xray_lines` is undefined
or empty but `metadata.Sample.elements` is defined,
use the same syntax as `add_line` to select a subset of lines
for the operation.
Alternatively, provide an iterable containing
a list of valid X-ray lines symbols.
integration_windows: Float or array
If float, the width of the integration windows is the
'integration_windows_width' times the calculated FWHM of the line.
Else provide an array for which each row corresponds to a X-ray
line. Each row contains the left and right value of the window.
background_windows: None or 2D array of float
If None, no background subtraction. Else, the backgrounds average
in the windows are subtracted from the return intensities.
'background_windows' provides the position of the windows in
energy. Each line corresponds to a X-ray line. In a line, the two
first values correspond to the limits of the left window and the
two last values correspond to the limits of the right window.
plot_result : bool
If True, plot the calculated line intensities. If the current
object is a single spectrum it prints the result instead.
only_one : bool
If False, use all the lines of each element in the data spectral
range. If True use only the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, use only the given lines.
kwargs
The extra keyword arguments for plotting. See
`utils.plot.plot_signals`
Returns
-------
intensities : list
A list containing the intensities as BaseSignal subclasses.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.get_lines_intensity(['Mn_Ka'], plot_result=True)
Mn_La at 0.63316 keV : Intensity = 96700.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot(['Mn_Ka'], integration_windows=2.1)
>>> s.get_lines_intensity(['Mn_Ka'],
>>> integration_windows=2.1, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 53597.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.set_elements(['Mn'])
>>> s.set_lines(['Mn_Ka'])
>>> bw = s.estimate_background_windows()
>>> s.plot(background_windows=bw)
>>> s.get_lines_intensity(background_windows=bw, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 46716.00
See also
--------
set_elements, add_elements, estimate_background_windows,
plot
"""
xray_lines = self._parse_xray_lines(xray_lines, only_one, only_lines)
if hasattr(integration_windows, '__iter__') is False:
integration_windows = self.estimate_integration_windows(
windows_width=integration_windows, xray_lines=xray_lines)
intensities = []
ax = self.axes_manager.signal_axes[0]
# test Signal1D (0D problem)
# signal_to_index = self.axes_manager.navigation_dimension - 2
for i, (Xray_line, window) in enumerate(
zip(xray_lines, integration_windows)):
element, line = utils_eds._get_element_and_line(Xray_line)
line_energy = self._get_line_energy(Xray_line)
img = self.isig[window[0]:window[1]].integrate1D(-1)
if np.issubdtype(img.data.dtype, np.integer):
# The operations below require a float dtype with the default
# numpy casting rule ('same_kind')
img.change_dtype("float")
if background_windows is not None:
bw = background_windows[i]
# TODO: test to prevent slicing bug. To be reomved when fixed
indexes = [float(ax.value2index(de))
for de in list(bw) + window]
if indexes[0] == indexes[1]:
bck1 = self.isig[bw[0]]
else:
bck1 = self.isig[bw[0]:bw[1]].integrate1D(-1)
if indexes[2] == indexes[3]:
bck2 = self.isig[bw[2]]
else:
bck2 = self.isig[bw[2]:bw[3]].integrate1D(-1)
corr_factor = (indexes[5] - indexes[4]) / (
(indexes[1] - indexes[0]) + (indexes[3] - indexes[2]))
img = img - (bck1 + bck2) * corr_factor
img.metadata.General.title = (
'X-ray line intensity of %s: %s at %.2f %s' %
(self.metadata.General.title,
Xray_line,
line_energy,
self.axes_manager.signal_axes[0].units,
))
img.axes_manager.set_signal_dimension(0)
if plot_result and img.axes_manager.navigation_size == 1:
print("%s at %s %s : Intensity = %.2f"
% (Xray_line,
line_energy,
ax.units,
img.data))
img.metadata.set_item("Sample.elements", ([element]))
img.metadata.set_item("Sample.xray_lines", ([Xray_line]))
intensities.append(img)
if plot_result and img.axes_manager.navigation_size != 1:
utils.plot.plot_signals(intensities, **kwargs)
return intensities
def get_take_off_angle(self):
"""Calculate the take-off-angle (TOA).
TOA is the angle with which the X-rays leave the surface towards
the detector. Parameters are read in 'SEM.Stage.tilt_alpha',
'Acquisition_instrument.SEM.Detector.EDS.azimuth_angle' and
'SEM.Detector.EDS.elevation_angle' in 'metadata'.
Returns
-------
take_off_angle: float
in Degree
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.get_take_off_angle()
37.0
>>> s.set_microscope_parameters(tilt_stage=20.)
>>> s.get_take_off_angle()
57.0
See also
--------
hs.eds.take_off_angle
Notes
-----
Defined by M. Schaffer et al., Ultramicroscopy 107(8), pp 587-597
(2007)
"""
if self.metadata.Signal.signal_type == "EDS_SEM":
mp = self.metadata.Acquisition_instrument.SEM
elif self.metadata.Signal.signal_type == "EDS_TEM":
mp = self.metadata.Acquisition_instrument.TEM
tilt_stage = mp.Stage.tilt_alpha
azimuth_angle = mp.Detector.EDS.azimuth_angle
elevation_angle = mp.Detector.EDS.elevation_angle
TOA = utils.eds.take_off_angle(tilt_stage, azimuth_angle,
elevation_angle)
return TOA
def estimate_integration_windows(self,
windows_width=2.,
xray_lines=None):
"""
Estimate a window of integration for each X-ray line.
Parameters
----------
windows_width: float
The width of the integration windows is the 'windows_width' times
the calculated FWHM of the line.
xray_lines: None or list of string
If None, use 'metadata.Sample.elements.xray_lines'. Else,
provide an iterable containing a list of valid X-ray lines
symbols.
Return
------
integration_windows: 2D array of float
The positions of the windows in energy. Each row corresponds to a
X-ray line. Each row contains the left and right value of the
window.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> s.add_lines()
>>> iw = s.estimate_integration_windows()
>>> s.plot(integration_windows=iw)
>>> s.get_lines_intensity(integration_windows=iw, plot_result=True)
Fe_Ka at 6.4039 keV : Intensity = 3710.00
Pt_La at 9.4421 keV : Intensity = 15872.00
See also
--------
plot, get_lines_intensity
"""
xray_lines = self._get_xray_lines(xray_lines)
integration_windows = []
for Xray_line in xray_lines:
line_energy, line_FWHM = self._get_line_energy(Xray_line,
FWHM_MnKa='auto')
element, line = utils_eds._get_element_and_line(Xray_line)
det = windows_width * line_FWHM / 2.
integration_windows.append([line_energy - det, line_energy + det])
return integration_windows
def estimate_background_windows(self,
line_width=[2, 2],
windows_width=1,
xray_lines=None):
"""
Estimate two windows around each X-ray line containing only the
background.
Parameters
----------
line_width: list of two floats
The position of the two windows around the X-ray line is given by
the `line_width` (left and right) times the calculated FWHM of the
line.
windows_width: float
The width of the windows is is the `windows_width` times the
calculated FWHM of the line.
xray_lines: None or list of string
If None, use `metadata.Sample.elements.xray_lines`. Else,
provide an iterable containing a list of valid X-ray lines
symbols.
Return
------
windows_position: 2D array of float
The position of the windows in energy. Each line corresponds to a
X-ray line. In a line, the two first values correspond to the
limits of the left window and the two last values correspond to
the limits of the right window.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> s.add_lines()
>>> bw = s.estimate_background_windows(line_width=[5.0, 2.0])
>>> s.plot(background_windows=bw)
>>> s.get_lines_intensity(background_windows=bw, plot_result=True)
Fe_Ka at 6.4039 keV : Intensity = 2754.00
Pt_La at 9.4421 keV : Intensity = 15090.00
See also
--------
plot, get_lines_intensity
"""
xray_lines = self._get_xray_lines(xray_lines)
windows_position = []
for xray_line in xray_lines:
line_energy, line_FWHM = self._get_line_energy(xray_line,
FWHM_MnKa='auto')
tmp = [
line_energy - line_FWHM * line_width[0] -
line_FWHM * windows_width,
line_energy - line_FWHM * line_width[0],
line_energy + line_FWHM * line_width[1],
line_energy + line_FWHM * line_width[1] +
line_FWHM * windows_width
]
windows_position.append(tmp)
windows_position = np.array(windows_position)
# merge ovelapping windows
index = windows_position.argsort(axis=0)[:, 0]
for i in range(len(index) - 1):
ia, ib = index[i], index[i + 1]
if windows_position[ia, 2] > windows_position[ib, 0]:
interv = np.append(windows_position[ia, :2],
windows_position[ib, 2:])
windows_position[ia] = interv
windows_position[ib] = interv
return windows_position
def plot(self,
xray_lines=False,
only_lines=("a", "b"),
only_one=False,
background_windows=None,
integration_windows=None,
**kwargs):
"""Plot the EDS spectrum. The following markers can be added
- The position of the X-ray lines and their names.
- The background windows associated with each X-ray lines. A black line
links the left and right window with the average value in each window.
Parameters
----------
xray_lines: {False, True, 'from_elements', list of string}
If not False, indicate the position and the name of the X-ray
lines.
If True, if `metadata.Sample.elements.xray_lines` contains a
list of lines use those. If `metadata.Sample.elements.xray_lines`
is undefined or empty or if xray_lines equals 'from_elements' and
`metadata.Sample.elements` is defined, use the same syntax as
`add_line` to select a subset of lines for the operation.
Alternatively, provide an iterable containing a list of valid X-ray
lines symbols.
only_lines : None or list of strings
If not None, use only the given lines (eg. ('a','Kb')).
If None, use all lines.
only_one : bool
If False, use all the lines of each element in the data spectral
range. If True use only the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
background_windows: None or 2D array of float
If not None, add markers at the position of the windows in energy.
Each line corresponds to a X-ray lines. In a line, the two first
value corresponds to the limit of the left window and the two
last values corresponds to the limit of the right window.
integration_windows: None or 'auto' or float or 2D array of float
If not None, add markers at the position of the integration
windows.
If 'auto' (or float), the width of the integration windows is 2.0
(or float) times the calculated FWHM of the line. see
'estimate_integration_windows'.
Else provide an array for which each row corresponds to a X-ray
line. Each row contains the left and right value of the window.
%s
%s
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot()
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot(True)
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> s.add_lines()
>>> bw = s.estimate_background_windows()
>>> s.plot(background_windows=bw)
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot(['Mn_Ka'], integration_windows='auto')
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> s.add_lines()
>>> bw = s.estimate_background_windows()
>>> s.plot(background_windows=bw, integration_windows=2.1)
See also
--------
set_elements, add_elements, estimate_integration_windows,
get_lines_intensity, estimate_background_windows
"""
super().plot(**kwargs)
self._plot_xray_lines(xray_lines, only_lines, only_one,
background_windows, integration_windows)
plot.__doc__ %= (BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT1D_DOCSTRING)
def _plot_xray_lines(self, xray_lines=False, only_lines=("a", "b"),
only_one=False, background_windows=None,
integration_windows=None):
if xray_lines is not False or\
background_windows is not None or\
integration_windows is not None:
if xray_lines is False:
xray_lines = True
only_lines = utils_eds._parse_only_lines(only_lines)
if xray_lines is True or xray_lines == 'from_elements':
if 'Sample.xray_lines' in self.metadata \
and xray_lines != 'from_elements':
xray_lines = self.metadata.Sample.xray_lines
elif 'Sample.elements' in self.metadata:
xray_lines = self._get_lines_from_elements(
self.metadata.Sample.elements,
only_one=only_one,
only_lines=only_lines)
else:
_logger.warning(
"No elements defined, set them with `add_elements`")
# No X-rays lines, nothing to do then
return
xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(
xray_lines)
for xray in xray_not_here:
_logger.warning("%s is not in the data energy range." % xray)
xray_lines = np.unique(xray_lines)
self.add_xray_lines_markers(xray_lines)
if background_windows is not None:
self._add_background_windows_markers(background_windows)
if integration_windows is not None:
if integration_windows == 'auto':
integration_windows = 2.0
if hasattr(integration_windows, '__iter__') is False:
integration_windows = self.estimate_integration_windows(
windows_width=integration_windows,
xray_lines=xray_lines)
self._add_vertical_lines_groups(integration_windows,
linestyle='--')
def _add_vertical_lines_groups(self, position, **kwargs):
"""
Add vertical markers for each group that shares the color.
Parameters
----------
position: 2D array of float
The position on the signal axis. Each row corresponds to a
group.
kwargs
keywords argument for markers.vertical_line
"""
per_xray = len(position[0])
if LooseVersion(matplotlib.__version__) >= "1.5.3":
colors = itertools.cycle(np.sort(
plt.rcParams['axes.prop_cycle'].by_key()["color"] * per_xray))
else:
colors = itertools.cycle(np.sort(
plt.rcParams['axes.color_cycle'] * per_xray))
for x, color in zip(np.ravel(position), colors):
line = markers.vertical_line(x=x, color=color, **kwargs)
self.add_marker(line, render_figure=False)
self._render_figure(plot=['signal_plot'])
def add_xray_lines_markers(self, xray_lines):
"""
Add marker on a spec.plot() with the name of the selected X-ray
lines
Parameters
----------
xray_lines: list of string
A valid list of X-ray lines
"""
line_energy = []
intensity = []
for xray_line in xray_lines:
element, line = utils_eds._get_element_and_line(xray_line)
line_energy.append(self._get_line_energy(xray_line))
relative_factor = elements_db[element][
'Atomic_properties']['Xray_lines'][line]['weight']
a_eng = self._get_line_energy(element + '_' + line[0] + 'a')
intensity.append(self.isig[a_eng].data * relative_factor)
for i in range(len(line_energy)):
line = markers.vertical_line_segment(
x=line_energy[i], y1=None, y2=intensity[i] * 0.8)
self.add_marker(line, render_figure=False)
string = (r'$\mathrm{%s}_{\mathrm{%s}}$' %
utils_eds._get_element_and_line(xray_lines[i]))
text = markers.text(
x=line_energy[i], y=intensity[i] * 1.1, text=string,
rotation=90)
self.add_marker(text, render_figure=False)
self._xray_markers[xray_lines[i]] = [line, text]
line.events.closed.connect(self._xray_marker_closed)
text.events.closed.connect(self._xray_marker_closed)
self._render_figure(plot=['signal_plot'])
def _xray_marker_closed(self, obj):
marker = obj
for xray_line, line_markers in reversed(list(
self._xray_markers.items())):
if marker in line_markers:
line_markers.remove(marker)
if not line_markers:
self._xray_markers.pop(xray_line)
def remove_xray_lines_markers(self, xray_lines):
"""
Remove marker previosuly added on a spec.plot() with the name of the
selected X-ray lines
Parameters
----------
xray_lines: list of string
A valid list of X-ray lines to remove
"""
for xray_line in xray_lines:
if xray_line in self._xray_markers:
line_markers = self._xray_markers[xray_line]
while line_markers:
m = line_markers.pop()
m.close(render_figure=False)
self._render_figure(plot=['signal_plot'])
def _add_background_windows_markers(self,
windows_position):
"""
Plot the background windows associated with each X-ray lines.
For X-ray lines, a black line links the left and right window with the
average value in each window.
Parameters
----------
windows_position: 2D array of float
The position of the windows in energy. Each line corresponds to a
X-ray lines. In a line, the two first value corresponds to the
limit of the left window and the two last values corresponds to the
limit of the right window.
See also
--------
estimate_background_windows, get_lines_intensity
"""
self._add_vertical_lines_groups(windows_position)
ax = self.axes_manager.signal_axes[0]
for bw in windows_position:
# TODO: test to prevent slicing bug. To be reomved when fixed
if ax.value2index(bw[0]) == ax.value2index(bw[1]):
y1 = self.isig[bw[0]].data
else:
y1 = self.isig[bw[0]:bw[1]].mean(-1).data
if ax.value2index(bw[2]) == ax.value2index(bw[3]):
y2 = self.isig[bw[2]].data
else:
y2 = self.isig[bw[2]:bw[3]].mean(-1).data
line = markers.line_segment(
x1=(bw[0] + bw[1]) / 2., x2=(bw[2] + bw[3]) / 2.,
y1=y1, y2=y2, color='black')
self.add_marker(line, render_figure=False)
self._render_figure(plot=['signal_plot'])
class EDSSpectrum(EDS_mixin, Signal1D):
pass
class LazyEDSSpectrum(EDSSpectrum, LazySignal1D):
pass
| gpl-3.0 |
Titan-C/scikit-learn | sklearn/metrics/scorer.py | 33 | 17925 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.model_selection.GridSearchCV` or
:func:`sklearn.model_selection.cross_val_score` as the ``scoring``
parameter, to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, mean_squared_log_error, accuracy_score,
f1_score, roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from .cluster import homogeneity_score
from .cluster import completeness_score
from .cluster import v_measure_score
from .cluster import mutual_info_score
from .cluster import adjusted_mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
# XXX After removing the deprecated scorers (v0.20) remove the
# XXX deprecation_msg property again and remove __call__'s body again
self._deprecation_msg = None
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
if self._deprecation_msg is not None:
warnings.warn(self._deprecation_msg,
category=DeprecationWarning,
stacklevel=2)
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_PredictScorer, self).__call__(estimator, X, y_true,
sample_weight=sample_weight)
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_ProbaScorer, self).__call__(clf, X, y,
sample_weight=sample_weight)
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_ThresholdScorer, self).__call__(clf, X, y,
sample_weight=sample_weight)
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
scorers = [scorer for scorer in SCORERS
if SCORERS[scorer]._deprecation_msg is None]
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(scorers)))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should be an estimator implementing "
"'fit' method, %r was passed" % estimator)
if isinstance(scoring, six.string_types):
return get_scorer(scoring)
elif has_scoring:
# Heuristic to ensure user has not passed a metric
module = getattr(scoring, '__module__', None)
if hasattr(module, 'startswith') and \
module.startswith('sklearn.metrics.') and \
not module.startswith('sklearn.metrics.scorer') and \
not module.startswith('sklearn.metrics.tests.'):
raise ValueError('scoring value %r looks like it is a metric '
'function rather than a scorer. A scorer should '
'require an estimator as its first parameter. '
'Please use `make_scorer` to convert a metric '
'to a scorer.' % scoring)
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
neg_mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
deprecation_msg = ('Scoring method mean_squared_error was renamed to '
'neg_mean_squared_error in version 0.18 and will '
'be removed in 0.20.')
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_squared_error_scorer._deprecation_msg = deprecation_msg
neg_mean_squared_log_error_scorer = make_scorer(mean_squared_log_error,
greater_is_better=False)
neg_mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
deprecation_msg = ('Scoring method mean_absolute_error was renamed to '
'neg_mean_absolute_error in version 0.18 and will '
'be removed in 0.20.')
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
mean_absolute_error_scorer._deprecation_msg = deprecation_msg
neg_median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
deprecation_msg = ('Scoring method median_absolute_error was renamed to '
'neg_median_absolute_error in version 0.18 and will '
'be removed in 0.20.')
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
median_absolute_error_scorer._deprecation_msg = deprecation_msg
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
neg_log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
deprecation_msg = ('Scoring method log_loss was renamed to '
'neg_log_loss in version 0.18 and will be removed in 0.20.')
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
log_loss_scorer._deprecation_msg = deprecation_msg
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
homogeneity_scorer = make_scorer(homogeneity_score)
completeness_scorer = make_scorer(completeness_score)
v_measure_scorer = make_scorer(v_measure_score)
mutual_info_scorer = make_scorer(mutual_info_score)
adjusted_mutual_info_scorer = make_scorer(adjusted_mutual_info_score)
normalized_mutual_info_scorer = make_scorer(normalized_mutual_info_score)
fowlkes_mallows_scorer = make_scorer(fowlkes_mallows_score)
SCORERS = dict(r2=r2_scorer,
neg_median_absolute_error=neg_median_absolute_error_scorer,
neg_mean_absolute_error=neg_mean_absolute_error_scorer,
neg_mean_squared_error=neg_mean_squared_error_scorer,
neg_mean_squared_log_error=neg_mean_squared_log_error_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
neg_log_loss=neg_log_loss_scorer,
# Cluster metrics that use supervised evaluation
adjusted_rand_score=adjusted_rand_scorer,
homogeneity_score=homogeneity_scorer,
completeness_score=completeness_scorer,
v_measure_score=v_measure_scorer,
mutual_info_score=mutual_info_scorer,
adjusted_mutual_info_score=adjusted_mutual_info_scorer,
normalized_mutual_info_score=normalized_mutual_info_scorer,
fowlkes_mallows_score=fowlkes_mallows_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(metric, pos_label=None,
average=average)
| bsd-3-clause |
dannyjacobs/PRISim | main/fhd_delay_spectrum_data_visualization.py | 1 | 28084 | import numpy as NP
from astropy.io import fits
from astropy.io import ascii
import scipy.constants as FCNST
from scipy import interpolate
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import matplotlib.cm as CMAP
import matplotlib.animation as MOV
from matplotlib import ticker
from scipy.interpolate import griddata
import datetime as DT
import time
import progressbar as PGB
import healpy as HP
import geometry as GEOM
import interferometry as RI
import catalog as CTLG
import constants as CNST
import my_DSP_modules as DSP
import my_operations as OPS
import primary_beams as PB
import baseline_delay_horizon as DLY
import lookup_operations as LKP
import ipdb as PDB
antenna_file = '/data3/t_nithyanandan/project_MWA/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt'
telescope = 'mwa'
telescope_str = telescope + '_'
if telescope == 'mwa':
telescope_str = ''
ant_locs = NP.loadtxt(antenna_file, skiprows=6, comments='#', usecols=(0,1,2,3))
ref_bl, ref_bl_id = RI.baseline_generator(ant_locs[:,1:], ant_id=ant_locs[:,0].astype(int).astype(str), auto=False, conjugate=False)
ref_bl_length = NP.sqrt(NP.sum(ref_bl**2, axis=1))
ref_bl_orientation = NP.angle(ref_bl[:,0] + 1j * ref_bl[:,1], deg=True)
neg_bl_orientation_ind = ref_bl_orientation < 0.0
ref_bl[neg_bl_orientation_ind,:] = -1.0 * ref_bl[neg_bl_orientation_ind,:]
ref_bl_orientation = NP.angle(ref_bl[:,0] + 1j * ref_bl[:,1], deg=True)
sortind = NP.argsort(ref_bl_length, kind='mergesort')
ref_bl = ref_bl[sortind,:]
ref_bl_length = ref_bl_length[sortind]
ref_bl_orientation = ref_bl_orientation[sortind]
ref_bl_id = ref_bl_id[sortind]
n_bins_baseline_orientation = 4
latitude = -26.701
fhd_obsid = [1061309344, 1061316544]
freq = 185.0 * 1e6 # foreground center frequency in Hz
freq_resolution = 80e3 # in Hz
bpass_shape = 'bhw'
n_channels = 384
nchan = n_channels
bw = nchan * freq_resolution
n_sky_sectors = 1
sky_sector = None # if None, use all sky sector. Accepted values are None, 0, 1, 2, or 3
if sky_sector is None:
sky_sector_str = '_all_sky_'
n_sky_sectors = 1
sky_sector = 0
else:
sky_sector_str = '_sky_sector_{0:0d}_'.format(sky_sector)
pointing_file = '/data3/t_nithyanandan/project_MWA/Aug23_obsinfo.txt'
pointing_info_from_file = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(1,2,3), delimiter=',')
obs_id = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(0,), delimiter=',', dtype=str)
lst = 15.0 * pointing_info_from_file[:,2]
pointings_altaz = OPS.reverse(pointing_info_from_file[:,:2].reshape(-1,2), axis=1)
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
max_abs_delay = 2.5 # in micro seconds
nside = 128
use_GSM = True
use_DSM = False
use_CSM = False
use_NVSS = False
use_SUMSS = False
use_MSS = False
use_GLEAM = False
use_PS = False
if use_GSM:
fg_str = 'asm'
elif use_DSM:
fg_str = 'dsm'
elif use_CSM:
fg_str = 'csm'
elif use_SUMSS:
fg_str = 'sumss'
elif use_GLEAM:
fg_str = 'gleam'
elif use_PS:
fg_str = 'point'
elif use_NVSS:
fg_str = 'nvss'
else:
fg_str = 'other'
backdrop_xsize = 100
backdrop_coords = 'radec'
if use_DSM or use_GSM:
backdrop_coords = 'radec'
if backdrop_coords == 'radec':
xmin = -180.0
xmax = 180.0
ymin = -90.0
ymax = 90.0
xgrid, ygrid = NP.meshgrid(NP.linspace(xmin, xmax, backdrop_xsize), NP.linspace(ymin, ymax, backdrop_xsize/2))
xvect = xgrid.ravel()
yvect = ygrid.ravel()
elif backdrop_coords == 'dircos':
xmin = -1.0
xmax = 1.0
ymin = -1.0
ymax = 1.0
xgrid, ygrid = NP.meshgrid(NP.linspace(xmin, xmax, backdrop_xsize), NP.linspace(ymin, ymax, backdrop_xsize))
nanind = (xgrid**2 + ygrid**2) > 1.0
goodind = (xgrid**2 + ygrid**2) <= 1.0
zgrid = NP.empty_like(xgrid)
zgrid[nanind] = NP.nan
zgrid[goodind] = NP.sqrt(1.0 - (xgrid[goodind]**2 + ygrid[goodind]**2))
xvect = xgrid.ravel()
yvect = ygrid.ravel()
zvect = zgrid.ravel()
xyzvect = NP.hstack((xvect.reshape(-1,1), yvect.reshape(-1,1), zvect.reshape(-1,1)))
if use_DSM or use_GSM:
dsm_file = '/data3/t_nithyanandan/project_MWA/foregrounds/gsmdata_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq/1e6,nside)
hdulist = fits.open(dsm_file)
dsm_table = hdulist[1].data
ra_deg = dsm_table['RA']
dec_deg = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes = temperatures
backdrop = HP.cartview(temperatures.ravel(), coord=['G','E'], rot=[0,0,0], xsize=backdrop_xsize, return_projected_map=True)
elif use_GLEAM or use_SUMSS or use_NVSS or use_CSM:
if use_GLEAM:
catalog_file = '/data3/t_nithyanandan/project_MWA/foregrounds/mwacs_b1_131016.csv' # GLEAM catalog
catdata = ascii.read(catalog_file, data_start=1, delimiter=',')
dec_deg = catdata['DEJ2000']
ra_deg = catdata['RAJ2000']
fpeak = catdata['S150_fit']
ferr = catdata['e_S150_fit']
freq_catalog = 1.4 # GHz
spindex = -0.83 + NP.zeros(fpeak.size)
fluxes = fpeak * (freq_catalog * 1e9 / freq)**spindex
elif use_SUMSS:
SUMSS_file = '/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt'
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg = ra_deg[PS_ind]
dec_deg = dec_deg[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 1.0
ra_deg = ra_deg[bright_source_ind]
dec_deg = dec_deg[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg = ra_deg[valid_ind]
dec_deg = dec_deg[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
freq_catalog = 0.843 # in GHz
spindex = -0.83 + NP.zeros(fint.size)
fluxes = fint * (freq_catalog*1e9/freq)**spindex
elif use_NVSS:
pass
else:
freq_SUMSS = 0.843 # in GHz
SUMSS_file = '/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt'
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
spindex_SUMSS = -0.83 + NP.zeros(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = freq_SUMSS*1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('SUMSS', fint.size)
ra_deg = ra_deg_SUMSS + 0.0
dec_deg = dec_deg_SUMSS
spindex = spindex_SUMSS
majax = fmajax/3.6e3
minax = fminax/3.6e3
fluxes = fint + 0.0
nvss_file = '/data3/t_nithyanandan/project_MWA/foregrounds/NVSS_catalog.fits'
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
spindex_NVSS = -0.83 + NP.zeros(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
ctlgobj = CTLG.Catalog(catlabel, freq_catalog, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes, spectral_index=spindex, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
if backdrop_coords == 'radec':
backdrop = griddata(NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes, NP.hstack((xvect.reshape(-1,1), yvect.reshape(-1,1))), method='cubic')
backdrop = backdrop.reshape(backdrop_xsize/2, backdrop_xsize)
elif backdrop_coords == 'dircos':
if (telescope == 'mwa_dipole') or (obs_mode == 'drift'):
backdrop = PB.primary_beam_generator(xyzvect, freq, telescope=telescope, freq_scale='Hz', skyunits='dircos', pointing_center=[0.0,0.0,1.0])
backdrop = backdrop.reshape(backdrop_xsize, backdrop_xsize)
else:
if use_PS:
catalog_file = '/data3/t_nithyanandan/project_MWA/foregrounds/PS_catalog.txt'
catdata = ascii.read(catalog_file, comment='#', header_start=0, data_start=1)
ra_deg = catdata['RA'].data
dec_deg = catdata['DEC'].data
fluxes = catdata['F_INT'].data
if backdrop_coords == 'radec':
ra_deg_wrapped = ra_deg.ravel() + 0.0
ra_deg_wrapped[ra_deg > 180.0] -= 360.0
dxvect = xgrid[0,1]-xgrid[0,0]
dyvect = ygrid[1,0]-ygrid[0,0]
ibind, nnval, distNN = LKP.lookup(ra_deg_wrapped.ravel(), dec_deg.ravel(), fluxes.ravel(), xvect, yvect, distance_ULIM=NP.sqrt(dxvect**2 + dyvect**2), remove_oob=False)
backdrop = nnval.reshape(backdrop_xsize/2, backdrop_xsize)
# backdrop = griddata(NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes, NP.hstack((xvect.reshape(-1,1), yvect.reshape(-1,1))), method='nearest')
# backdrop = backdrop.reshape(backdrop_xsize/2, backdrop_xsize)
elif backdrop_coords == 'dircos':
if (telescope == 'mwa_dipole') or (obs_mode == 'drift'):
backdrop = PB.primary_beam_generator(xyzvect, freq, telescope=telescope, freq_scale='Hz', skyunits='dircos', pointing_center=[0.0,0.0,1.0])
backdrop = backdrop.reshape(backdrop_xsize, backdrop_xsize)
cardinal_blo = 180.0 / n_bins_baseline_orientation * (NP.arange(n_bins_baseline_orientation)-1).reshape(-1,1)
cardinal_bll = 100.0
cardinal_bl = cardinal_bll * NP.hstack((NP.cos(NP.radians(cardinal_blo)), NP.sin(NP.radians(cardinal_blo)), NP.zeros_like(cardinal_blo)))
pc = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
pc_coords = 'dircos'
for j in range(len(fhd_obsid)):
fhd_infile = '/data3/t_nithyanandan/project_MWA/fhd_delay_spectrum_{0:0d}_reformatted.npz'.format(fhd_obsid[j])
fhd_data = NP.load(fhd_infile)
fhd_bl_id = fhd_data['fhd_bl_id']
fhd_bl_ind = NP.squeeze(NP.where(NP.in1d(ref_bl_id, fhd_bl_id)))
bl_id = ref_bl_id[fhd_bl_ind]
bl = ref_bl[fhd_bl_ind, :]
bl_length = ref_bl_length[fhd_bl_ind]
bl_orientation = ref_bl_orientation[fhd_bl_ind]
fhd_vis_lag_noisy = fhd_data['fhd_vis_lag_noisy']
fhd_delays = fhd_data['fhd_delays']
fhd_C = fhd_data['fhd_C']
valid_ind = NP.logical_and(NP.abs(NP.sum(fhd_vis_lag_noisy[:,:,0],axis=1))!=0.0, NP.abs(NP.sum(fhd_C[:,:,0],axis=1))!=0.0)
fhd_C = fhd_C[valid_ind,:,:]
fhd_vis_lag_noisy = fhd_vis_lag_noisy[valid_ind,:,:]
bl_id = bl_id[valid_ind]
bl = bl[valid_ind,:]
bl_length = bl_length[valid_ind]
bl_orientation = bl_orientation[valid_ind]
neg_bl_orientation_ind = bl_orientation > 90.0 + 0.5*180.0/n_bins_baseline_orientation
bl_orientation[neg_bl_orientation_ind] -= 180.0
bl[neg_bl_orientation_ind,:] = -bl[neg_bl_orientation_ind,:]
fhd_vis_lag_noisy *= 2.78*nchan*freq_resolution/fhd_C
fhd_obsid_pointing_dircos = pointings_dircos[obs_id==str(fhd_obsid[j]),:].reshape(1,-1)
fhd_obsid_pointing_altaz = pointings_altaz[obs_id==str(fhd_obsid[j]),:].reshape(1,-1)
fhd_obsid_pointing_hadec = pointings_hadec[obs_id==str(fhd_obsid[j]),:].reshape(1,-1)
fhd_lst = NP.asscalar(lst[obs_id==str(fhd_obsid[j])])
fhd_obsid_pointing_radec = NP.copy(fhd_obsid_pointing_hadec)
fhd_obsid_pointing_radec[0,0] = fhd_lst - fhd_obsid_pointing_hadec[0,0]
delay_matrix = DLY.delay_envelope(bl, fhd_obsid_pointing_dircos, units='mks')
delaymat = DLY.delay_envelope(bl, pc, units='mks')
min_delay = -delaymat[0,:,1]-delaymat[0,:,0]
max_delay = delaymat[0,:,0]-delaymat[0,:,1]
min_delay = min_delay.reshape(-1,1)
max_delay = max_delay.reshape(-1,1)
thermal_noise_window = NP.abs(fhd_delays) >= max_abs_delay*1e-6
thermal_noise_window = thermal_noise_window.reshape(1,-1)
thermal_noise_window = NP.repeat(thermal_noise_window, bl.shape[0], axis=0)
EoR_window = NP.logical_or(fhd_delays > max_delay+1/bw, fhd_delays < min_delay-1/bw)
wedge_window = NP.logical_and(fhd_delays <= max_delay, fhd_delays >= min_delay)
fhd_vis_rms_lag = OPS.rms(fhd_vis_lag_noisy[:,:,0], mask=NP.logical_not(thermal_noise_window), axis=1)
fhd_vis_rms_freq = NP.abs(fhd_vis_rms_lag) / NP.sqrt(nchan) / freq_resolution
PDB.set_trace()
print fhd_vis_rms_freq
if max_abs_delay is not None:
small_delays_ind = NP.abs(fhd_delays) <= max_abs_delay * 1e-6
fhd_delays = fhd_delays[small_delays_ind]
fhd_vis_lag_noisy = fhd_vis_lag_noisy[:,small_delays_ind,:]
# fig = PLT.figure(figsize=(6,8))
# ax = fig.add_subplot(111)
# ax.set_xlabel('Baseline Index', fontsize=18)
# ax.set_ylabel(r'lag [$\mu$s]', fontsize=18)
# # dspec = ax.imshow(NP.abs(fhd_vis_lag_noisy[:,:,0].T), origin='lower', extent=(0, fhd_vis_lag_noisy.shape[0]-1, NP.amin(fhd_delays*1e6), NP.amax(fhd_delays*1e6)), interpolation=None)
# dspec = ax.imshow(NP.abs(fhd_vis_lag_noisy[:,:,0].T), origin='lower', extent=(0, fhd_vis_lag_noisy.shape[0]-1, NP.amin(fhd_delays*1e6), NP.amax(fhd_delays*1e6)), norm=PLTC.LogNorm(1.0e7, vmax=1.0e10), interpolation=None)
# ax.set_aspect('auto')
# cbax = fig.add_axes([0.88, 0.08, 0.03, 0.9])
# cb = fig.colorbar(dspec, cax=cbax, orientation='vertical')
# cbax.set_ylabel('Jy Hz', labelpad=-60, fontsize=18)
# PLT.tight_layout()
# fig.subplots_adjust(right=0.8)
# fig.subplots_adjust(left=0.1)
# PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/fhd_multi_baseline_CLEAN_visibilities_{0:0d}.eps'.format(fhd_obsid[j]), bbox_inches=0)
# PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/fhd_multi_baseline_CLEAN_visibilities_{0:0d}.png'.format(fhd_obsid[j]), bbox_inches=0)
blo = NP.copy(bl_orientation)
bloh, bloe, blon, blori = OPS.binned_statistic(blo, statistic='count', bins=n_bins_baseline_orientation, range=[(-90.0+0.5*180.0/n_bins_baseline_orientation, 90.0+0.5*180.0/n_bins_baseline_orientation)])
if n_bins_baseline_orientation == 4:
blo_ax_mapping = [7,4,1,2,3,6,9,8]
overlay = {}
if backdrop_coords == 'radec':
havect = fhd_lst - xvect
altaz = GEOM.hadec2altaz(NP.hstack((havect.reshape(-1,1),yvect.reshape(-1,1))), latitude, units='degrees')
dircos = GEOM.altaz2dircos(altaz, units='degrees')
roi_altaz = NP.asarray(NP.where(altaz[:,0] >= 0.0)).ravel()
az = altaz[:,1] + 0.0
az[az > 360.0 - 0.5*180.0/n_sky_sectors] -= 360.0
roi_sector_altaz = NP.asarray(NP.where(NP.logical_or(NP.logical_and(az[roi_altaz] >= -0.5*180.0/n_sky_sectors + sky_sector*180.0/n_sky_sectors, az[roi_altaz] < -0.5*180.0/n_sky_sectors + (sky_sector+1)*180.0/n_sky_sectors), NP.logical_and(az[roi_altaz] >= 180.0 - 0.5*180.0/n_sky_sectors + sky_sector*180.0/n_sky_sectors, az[roi_altaz] < 180.0 - 0.5*180.0/n_sky_sectors + (sky_sector+1)*180.0/n_sky_sectors)))).ravel()
pb = NP.empty(xvect.size)
pb.fill(NP.nan)
bd = NP.empty(xvect.size)
bd.fill(NP.nan)
pb[roi_altaz] = PB.primary_beam_generator(altaz[roi_altaz,:], freq, telescope=telescope, skyunits='altaz', freq_scale='Hz', pointing_center=fhd_obsid_pointing_altaz)
# bd[roi_altaz] = backdrop.ravel()[roi_altaz]
# pb[roi_altaz[roi_sector_altaz]] = PB.primary_beam_generator(altaz[roi_altaz[roi_sector_altaz],:], freq, telescope=telescope, skyunits='altaz', freq_scale='Hz', phase_center=fhd_obsid_pointing_altaz)
bd[roi_altaz[roi_sector_altaz]] = backdrop.ravel()[roi_altaz[roi_sector_altaz]]
overlay['pbeam'] = pb
overlay['backdrop'] = bd
overlay['roi_obj_inds'] = roi_altaz
overlay['roi_sector_inds'] = roi_altaz[roi_sector_altaz]
overlay['delay_map'] = NP.empty((n_bins_baseline_orientation, xvect.size))
overlay['delay_map'].fill(NP.nan)
overlay['delay_map'][:,roi_altaz] = (DLY.geometric_delay(cardinal_bl, altaz[roi_altaz,:], altaz=True, dircos=False, hadec=False, latitude=latitude)-DLY.geometric_delay(cardinal_bl, pc, altaz=False, dircos=True, hadec=False, latitude=latitude)).T
if use_CSM or use_SUMSS or use_NVSS or use_PS:
src_hadec = NP.hstack(((fhd_lst-ctlgobj.location[:,0]).reshape(-1,1), ctlgobj.location[:,1].reshape(-1,1)))
src_altaz = GEOM.hadec2altaz(src_hadec, latitude, units='degrees')
roi_src_altaz = NP.asarray(NP.where(src_altaz[:,0] >= 0.0)).ravel()
roi_pbeam = PB.primary_beam_generator(src_altaz[roi_src_altaz,:], freq, telescope=telescope, skyunits='altaz', freq_scale='Hz', pointing_center=fhd_obsid_pointing_altaz)
overlay['src_ind'] = roi_src_altaz
overlay['pbeam_on_src'] = roi_pbeam.ravel()
# delay_envelope = DLY.delay_envelope(cardinal_bl, dircos[roi_altaz,:])
# overlay['delay_map'][:,roi_altaz] = (DLY.geometric_delay(cardinal_bl, altaz[roi_altaz,:], altaz=True, dircos=False, hadec=False, latitude=latitude)-DLY.geometric_delay(cardinal_bl, fhd_obsid_pointing_altaz, altaz=True, dircos=False, hadec=False, latitude=latitude)).T
# roi_obj_inds += [roi_altaz]
elif backdrop_coords == 'dircos':
havect = fhd_lst - ra_deg
fg_altaz = GEOM.hadec2altaz(NP.hstack((havect.reshape(-1,1),dec_deg.reshape(-1,1))), latitude, units='degrees')
fg_dircos = GEOM.altaz2dircos(fg_altaz, units='degrees')
roi_dircos = NP.asarray(NP.where(fg_dircos[:,2] >= 0.0)).ravel()
overlay['roi_obj_inds'] = roi_dircos
overlay['fg_dircos'] = fg_dircos
if obs_mode == 'track':
pb = PB.primary_beam_generator(xyzvect, freq, telescope=telescope, skyunits='dircos', freq_scale='Hz', pointing_center=fhd_obsid_pointing_dircos)
# pb[pb < 0.5] = NP.nan
overlay['pbeam'] = pb.reshape(backdrop_xsize, backdrop_xsize)
overlay['delay_map'] = NP.empty((n_bins_baseline_orientation, xyzvect.shape[0])).fill(NP.nan)
mindelay = NP.nanmin(overlay['delay_map'])
maxdelay = NP.nanmax(overlay['delay_map'])
norm_b = PLTC.Normalize(vmin=mindelay, vmax=maxdelay)
fig = PLT.figure(figsize=(10,10))
faxs = []
for i in xrange(n_bins_baseline_orientation):
ax = fig.add_subplot(3,3,blo_ax_mapping[i])
ax.set_xlim(0,bloh[i]-1)
ax.set_ylim(NP.amin(fhd_delays*1e6), NP.amax(fhd_delays*1e6))
ax.set_title(r'{0:+.1f} <= $\theta_b [deg]$ < {1:+.1f}'.format(bloe[i], bloe[(i)+1]), weight='medium')
ax.set_ylabel(r'lag [$\mu$s]', fontsize=18)
blind = blori[blori[i]:blori[i+1]]
sortind = NP.argsort(bl_length[blind], kind='heapsort')
imdspec = ax.imshow(NP.abs(fhd_vis_lag_noisy[blind[sortind],:,0].T), origin='lower', extent=(0, blind.size-1, NP.amin(fhd_delays*1e6), NP.amax(fhd_delays*1e6)), norm=PLTC.LogNorm(vmin=1e5, vmax=5e10), interpolation=None)
# norm=PLTC.LogNorm(vmin=1e-1, vmax=NP.amax(NP.abs(fhd_vis_lag_noisy))),
l = ax.plot([], [], 'k:', [], [], 'k:', [], [], 'k--', [], [], 'k--')
ax.set_aspect('auto')
faxs += [ax]
ax = fig.add_subplot(3,3,blo_ax_mapping[i+n_bins_baseline_orientation])
if backdrop_coords == 'radec':
ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium')
ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium')
elif backdrop_coords == 'dircos':
ax.set_xlabel('l')
ax.set_ylabel('m')
imdmap = ax.imshow(1e6 * OPS.reverse(overlay['delay_map'][i,:].reshape(-1,backdrop_xsize), axis=1), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)))
# PDB.set_trace()
imdmappbc = ax.contour(xgrid[0,:], ygrid[:,0], overlay['pbeam'].reshape(-1,backdrop_xsize), levels=[0.0078125, 0.03125, 0.125, 0.5], colors='k')
# imdmap.set_clim(mindelay, maxdelay)
ax.set_title(r'$\theta_b$ = {0:+.1f} [deg]'.format(cardinal_blo.ravel()[i]), fontsize=18, weight='medium')
ax.grid(True)
ax.tick_params(which='major', length=12, labelsize=12)
ax.tick_params(which='minor', length=6)
ax.locator_params(axis='x', nbins=5)
faxs += [ax]
cbmnt = NP.amin(NP.abs(fhd_vis_lag_noisy))
cbmxt = NP.amax(NP.abs(fhd_vis_lag_noisy))
cbaxt = fig.add_axes([0.1, 0.95, 0.8, 0.02])
cbart = fig.colorbar(imdspec, cax=cbaxt, orientation='horizontal')
cbaxt.set_xlabel('Jy', labelpad=-50, fontsize=18)
# cbmnb = NP.nanmin(overlays[0]['delay_map']) * 1e6
# cbmxb = NP.nanmax(overlays[0]['delay_map']) * 1e6
# cbmnb = mindelay * 1e6
# cbmxb = maxdelay * 1e6
cbaxb = fig.add_axes([0.1, 0.06, 0.8, 0.02])
cbarb = fig.colorbar(imdmap, cax=cbaxb, orientation='horizontal', norm=norm_b)
cbaxb.set_xlabel(r'x (bl/100) $\mu$s', labelpad=-45, fontsize=18)
ax = fig.add_subplot(3,3,5)
# imsky1 = ax.imshow(backdrop, origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)))
impbc = ax.contour(xgrid[0,:], ygrid[:,0], overlay['pbeam'].reshape(-1,backdrop_xsize), levels=[0.0078125, 0.03125, 0.125, 0.5], colors='k')
if use_CSM or use_NVSS or use_SUMSS or use_PS:
imsky2 = ax.scatter(ra_deg_wrapped[overlay['src_ind']].ravel(), dec_deg[overlay['src_ind']].ravel(), c=overlay['pbeam_on_src']*fluxes[overlay['src_ind']], norm=PLTC.LogNorm(vmin=1e-3, vmax=1.0), cmap=CMAP.jet, edgecolor='none', s=10)
else:
imsky2 = ax.imshow(OPS.reverse((overlay['pbeam']*overlay['backdrop']).reshape(-1,backdrop_xsize), axis=1), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), alpha=0.85, norm=PLTC.LogNorm(vmin=1e-2, vmax=1e2))
ax.set_xlim(xvect.max(), xvect.min())
ax.set_ylim(yvect.min(), yvect.max())
ax.set_title('Foregrounds', fontsize=18, weight='medium')
ax.grid(True)
ax.set_aspect('equal')
ax.tick_params(which='major', length=12, labelsize=12)
ax.tick_params(which='minor', length=6)
if backdrop_coords == 'radec':
ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium')
ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium')
elif backdrop_coords == 'dircos':
ax.set_xlabel('l')
ax.set_ylabel('m')
ax.locator_params(axis='x', nbins=5)
cbmnc = NP.nanmin(overlay['pbeam']*overlay['backdrop'])
cbmxc = NP.nanmax(overlay['pbeam']*overlay['backdrop'])
cbaxc = fig.add_axes([0.4, 0.35, 0.25, 0.02])
# cbarc = fig.colorbar(ax.images[1], cax=cbaxc, orientation='horizontal')
cbarc = fig.colorbar(imsky2, cax=cbaxc, orientation='horizontal')
if use_GSM or use_DSM:
cbaxc.set_xlabel('Temperature [K]', labelpad=-50, fontsize=18, weight='medium')
else:
cbaxc.set_xlabel('Flux Density [Jy]', labelpad=-50, fontsize=18, weight='medium')
# tick_locator = ticker.MaxNLocator(nbins=21)
# cbarc.locator = tick_locator
# cbarc.update_ticks()
faxs += [ax]
tpc = faxs[-1].text(0.5, 1.25, r' $\alpha$ = {0[0]:+.3f} deg, $\delta$ = {0[1]:+.2f} deg'.format(fhd_obsid_pointing_radec.ravel()) + '\nLST = {0:.2f} hrs'.format(fhd_lst), transform=ax.transAxes, fontsize=14, weight='medium', ha='center')
PLT.tight_layout()
fig.subplots_adjust(bottom=0.1)
fig.subplots_adjust(top=0.9)
PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/'+telescope_str+'fhd_multi_baseline_CLEAN_visibilities_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'{0:.1f}_MHz_{1:.1f}_MHz_'.format(freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'_snapshot_{0:0d}.eps'.format(fhd_obsid[j]), bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/'+telescope_str+'fhd_multi_baseline_CLEAN_visibilities_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'{0:.1f}_MHz_{1:.1f}_MHz_'.format(freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'_snapshot_{0:0d}.png'.format(fhd_obsid[j]), bbox_inches=0)
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/axes_grid/demo_axes_hbox_divider.py | 8 | 1550 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import HBoxDivider
import mpl_toolkits.axes_grid1.axes_size as Size
def make_heights_equal(fig, rect, ax1, ax2, pad):
# pad in inches
h1, v1 = Size.AxesX(ax1), Size.AxesY(ax1)
h2, v2 = Size.AxesX(ax2), Size.AxesY(ax2)
pad_v = Size.Scaled(1)
pad_h = Size.Fixed(pad)
my_divider = HBoxDivider(fig, rect,
horizontal=[h1, pad_h, h2],
vertical=[v1, pad_v, v2])
ax1.set_axes_locator(my_divider.new_locator(0))
ax2.set_axes_locator(my_divider.new_locator(2))
if __name__ == "__main__":
fig1 = plt.figure()
arr1 = np.arange(20).reshape((4,5))
arr2 = np.arange(20).reshape((5,4))
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
ax1.imshow(arr1, interpolation="nearest")
ax2.imshow(arr2, interpolation="nearest")
rect = 111 # subplot param for combined axes
make_heights_equal(fig1, rect, ax1, ax2, pad=0.5) # pad in inches
for ax in [ax1, ax2]:
ax.locator_params(nbins=4)
# annotate
ax3 = plt.axes([0.5, 0.5, 0.001, 0.001], frameon=False)
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
ax3.annotate("Location of two axes are adjusted\n so that they have equal heights\n while maintaining their aspect ratios", (0.5, 0.5),
xycoords="axes fraction", va="center", ha="center",
bbox=dict(boxstyle="round, pad=1", fc="w"))
plt.show()
| mit |
PatrickChrist/scikit-learn | sklearn/neighbors/regression.py | 100 | 11017 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
ngoix/OCRF | sklearn/decomposition/tests/test_truncated_svd.py | 73 | 6086 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
saketkc/statsmodels | statsmodels/discrete/tests/test_discrete.py | 19 | 55886 | """
Tests for discrete models
Notes
-----
DECIMAL_3 is used because it seems that there is a loss of precision
in the Stata *.dta -> *.csv output, NOT the estimator for the Poisson
tests.
"""
# pylint: disable-msg=E1101
from statsmodels.compat.python import range
import os
import numpy as np
from numpy.testing import (assert_, assert_raises, assert_almost_equal,
assert_equal, assert_array_equal, assert_allclose,
assert_array_less)
from statsmodels.discrete.discrete_model import (Logit, Probit, MNLogit,
Poisson, NegativeBinomial)
from statsmodels.discrete.discrete_margins import _iscount, _isdummy
import statsmodels.api as sm
import statsmodels.formula.api as smf
from nose import SkipTest
from .results.results_discrete import Spector, DiscreteL1, RandHIE, Anes
from statsmodels.tools.sm_exceptions import PerfectSeparationError
try:
import cvxopt
has_cvxopt = True
except ImportError:
has_cvxopt = False
try:
from scipy.optimize import basinhopping
has_basinhopping = True
except ImportError:
has_basinhopping = False
DECIMAL_14 = 14
DECIMAL_10 = 10
DECIMAL_9 = 9
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
class CheckModelResults(object):
"""
res2 should be the test results from RModelWrap
or the results as defined in model_results_data
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_allclose(self.res1.conf_int(), self.res2.conf_int, rtol=8e-5)
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_4)
def pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
# def test_cov_params(self):
# assert_almost_equal(self.res1.cov_params(), self.res2.cov_params,
# DECIMAL_4)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
def test_llnull(self):
assert_almost_equal(self.res1.llnull, self.res2.llnull, DECIMAL_4)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_3)
def test_llr_pvalue(self):
assert_almost_equal(self.res1.llr_pvalue, self.res2.llr_pvalue,
DECIMAL_4)
def test_normalized_cov_params(self):
pass
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_dof(self):
assert_equal(self.res1.df_model, self.res2.df_model)
assert_equal(self.res1.df_resid, self.res2.df_resid)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.model.predict(self.res1.params),
self.res2.phat, DECIMAL_4)
def test_predict_xb(self):
assert_almost_equal(self.res1.model.predict(self.res1.params,
linear=True),
self.res2.yhat, DECIMAL_4)
def test_loglikeobs(self):
#basic cross check
llobssum = self.res1.model.loglikeobs(self.res1.params).sum()
assert_almost_equal(llobssum, self.res1.llf, DECIMAL_14)
def test_jac(self):
#basic cross check
jacsum = self.res1.model.score_obs(self.res1.params).sum(0)
score = self.res1.model.score(self.res1.params)
assert_almost_equal(jacsum, score, DECIMAL_9) #Poisson has low precision ?
class CheckBinaryResults(CheckModelResults):
def test_pred_table(self):
assert_array_equal(self.res1.pred_table(), self.res2.pred_table)
def test_resid_dev(self):
assert_almost_equal(self.res1.resid_dev, self.res2.resid_dev,
DECIMAL_4)
def test_resid_generalized(self):
assert_almost_equal(self.res1.resid_generalized,
self.res2.resid_generalized, DECIMAL_4)
def smoke_test_resid_response(self):
self.res1.resid_response
class CheckMargEff(object):
"""
Test marginal effects (margeff) and its options
"""
def test_nodummy_dydxoverall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydx_se, DECIMAL_4)
def test_nodummy_dydxmean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmean_se, DECIMAL_4)
def test_nodummy_dydxmedian(self):
me = self.res1.get_margeff(at='median')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmedian_se, DECIMAL_4)
def test_nodummy_dydxzero(self):
me = self.res1.get_margeff(at='zero')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
def test_nodummy_dyexoverall(self):
me = self.res1.get_margeff(method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyex_se, DECIMAL_4)
def test_nodummy_dyexmean(self):
me = self.res1.get_margeff(at='mean', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmean_se, DECIMAL_4)
def test_nodummy_dyexmedian(self):
me = self.res1.get_margeff(at='median', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmedian_se, DECIMAL_4)
def test_nodummy_dyexzero(self):
me = self.res1.get_margeff(at='zero', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexzero_se, DECIMAL_4)
def test_nodummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydx_se, DECIMAL_4)
def test_nodummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmean_se, DECIMAL_4)
def test_nodummy_eydxmedian(self):
me = self.res1.get_margeff(at='median', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmedian_se, DECIMAL_4)
def test_nodummy_eydxzero(self):
me = self.res1.get_margeff(at='zero', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxzero_se, DECIMAL_4)
def test_nodummy_eyexoverall(self):
me = self.res1.get_margeff(method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyex_se, DECIMAL_4)
def test_nodummy_eyexmean(self):
me = self.res1.get_margeff(at='mean', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmean_se, DECIMAL_4)
def test_nodummy_eyexmedian(self):
me = self.res1.get_margeff(at='median', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmedian_se, DECIMAL_4)
def test_nodummy_eyexzero(self):
me = self.res1.get_margeff(at='zero', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexzero_se, DECIMAL_4)
def test_dummy_dydxoverall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydx_se, DECIMAL_4)
def test_dummy_dydxmean(self):
me = self.res1.get_margeff(at='mean', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydxmean_se, DECIMAL_4)
def test_dummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydx_se, DECIMAL_4)
def test_dummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydxmean_se, DECIMAL_4)
def test_count_dydxoverall(self):
me = self.res1.get_margeff(count=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydx_se, DECIMAL_4)
def test_count_dydxmean(self):
me = self.res1.get_margeff(count=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydxmean_se, DECIMAL_4)
def test_count_dummy_dydxoverall(self):
me = self.res1.get_margeff(count=True, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxoverall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxoverall_se, DECIMAL_4)
def test_count_dummy_dydxmean(self):
me = self.res1.get_margeff(count=True, dummy=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxmean_se, DECIMAL_4)
class TestProbitNewton(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
#def test_predict(self):
# assert_almost_equal(self.res1.model.predict(self.res1.params),
# self.res2.predict, DECIMAL_4)
class TestProbitBFGS(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="bfgs",
disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
class TestProbitNM(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="nm",
disp=0, maxiter=500)
class TestProbitPowell(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="powell",
disp=0, ftol=1e-8)
class TestProbitCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
# fmin_cg fails to converge on some machines - reparameterize
from statsmodels.tools.transform_model import StandardizeTransform
transf = StandardizeTransform(data.exog)
exog_st = transf(data.exog)
res1_st = Probit(data.endog,
exog_st).fit(method="cg", disp=0, maxiter=1000,
gtol=1e-08)
start_params = transf.transform_params(res1_st.params)
assert_allclose(start_params, res2.params, rtol=1e-5, atol=1e-6)
cls.res1 = Probit(data.endog,
data.exog).fit(start_params=start_params,
method="cg", maxiter=1000,
gtol=1e-05, disp=0)
assert_array_less(cls.res1.mle_retvals['fcalls'], 100)
class TestProbitNCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="ncg",
disp=0, avextol=1e-8,
warn_convergence=False)
# converges close enough but warnflag is 2 for precision loss
class TestProbitBasinhopping(CheckBinaryResults):
@classmethod
def setupClass(cls):
if not has_basinhopping:
raise SkipTest("Skipped TestProbitBasinhopping since"
" basinhopping solver is not available")
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
fit = Probit(data.endog, data.exog).fit
cls.res1 = fit(method="basinhopping", disp=0, niter=5,
minimizer={'method' : 'L-BFGS-B', 'tol' : 1e-8})
class CheckLikelihoodModelL1(object):
"""
For testing results generated with L1 regularization
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(
self.res1.conf_int(), self.res2.conf_int, DECIMAL_4)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_nnz_params(self):
assert_almost_equal(
self.res1.nnz_params, self.res2.nnz_params, DECIMAL_4)
def test_aic(self):
assert_almost_equal(
self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(
self.res1.bic, self.res2.bic, DECIMAL_3)
class TestProbitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0.1, 0.2, 0.3, 10]) #/ data.exog.shape[0]
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.probit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestMNLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) #/ anes_exog.shape[0]
alpha[-1,:] = 0
cls.res1 = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
acc=1e-10, disp=0)
res2 = DiscreteL1()
res2.mnlogit()
cls.res2 = res2
class TestLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.alpha = 3 * np.array([0., 1., 1., 1.]) #/ data.exog.shape[0]
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=cls.alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.logit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestCVXOPT(object):
@classmethod
def setupClass(self):
self.data = sm.datasets.spector.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=True)
def test_cvxopt_versus_slsqp(self):
#Compares resutls from cvxopt to the standard slsqp
if has_cvxopt:
self.alpha = 3. * np.array([0, 1, 1, 1.]) #/ self.data.endog.shape[0]
res_slsqp = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1", alpha=self.alpha, disp=0, acc=1e-10, maxiter=1000,
trim_mode='auto')
res_cvxopt = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1_cvxopt_cp", alpha=self.alpha, disp=0, abstol=1e-10,
trim_mode='auto', auto_trim_tol=0.01, maxiter=1000)
assert_almost_equal(res_slsqp.params, res_cvxopt.params, DECIMAL_4)
else:
raise SkipTest("Skipped test_cvxopt since cvxopt is not available")
class TestSweepAlphaL1(object):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.model = Logit(data.endog, data.exog)
cls.alphas = np.array(
[[0.1, 0.1, 0.1, 0.1],
[0.4, 0.4, 0.5, 0.5],
[0.5, 0.5, 1, 1]]) #/ data.exog.shape[0]
cls.res1 = DiscreteL1()
cls.res1.sweep()
def test_sweep_alpha(self):
for i in range(3):
alpha = self.alphas[i, :]
res2 = self.model.fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-10,
trim_mode='off', maxiter=1000)
assert_almost_equal(res2.params, self.res1.params[i], DECIMAL_4)
class CheckL1Compatability(object):
"""
Tests compatability between l1 and unregularized by setting alpha such
that certain parameters should be effectively unregularized, and others
should be ignored by the model.
"""
def test_params(self):
m = self.m
assert_almost_equal(
self.res_unreg.params[:m], self.res_reg.params[:m], DECIMAL_4)
# The last entry should be close to zero
# handle extra parameter of NegativeBinomial
kvars = self.res_reg.model.exog.shape[1]
assert_almost_equal(0, self.res_reg.params[m:kvars], DECIMAL_4)
def test_cov_params(self):
m = self.m
# The restricted cov_params should be equal
assert_almost_equal(
self.res_unreg.cov_params()[:m, :m],
self.res_reg.cov_params()[:m, :m],
DECIMAL_1)
def test_df(self):
assert_equal(self.res_unreg.df_model, self.res_reg.df_model)
assert_equal(self.res_unreg.df_resid, self.res_reg.df_resid)
def test_t_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
t_unreg = self.res_unreg.t_test(np.eye(len(self.res_unreg.params)))
t_reg = self.res_reg.t_test(np.eye(kvars + extra))
assert_almost_equal(t_unreg.effect[:m], t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd[:m], t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_allclose(t_unreg.tvalue[:m], t_reg.tvalue[:m], atol=3e-3)
assert_almost_equal(np.nan, t_reg.tvalue[m])
def test_f_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
f_unreg = self.res_unreg.f_test(np.eye(len(self.res_unreg.params))[:m])
f_reg = self.res_reg.f_test(np.eye(kvars + extra)[:m])
assert_allclose(f_unreg.fvalue, f_reg.fvalue, rtol=3e-5, atol=1e-3)
assert_almost_equal(f_unreg.pvalue, f_reg.pvalue, DECIMAL_3)
def test_bad_r_matrix(self):
kvars = self.kvars
assert_raises(ValueError, self.res_reg.f_test, np.eye(kvars) )
class TestPoissonL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.Poisson(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
cls.res_reg = sm.Poisson(rand_data.endog, rand_exog).fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
class TestNegativeBinomialL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog_st = (rand_exog - rand_exog.mean(0)) / rand_exog.std(0)
rand_exog = sm.add_constant(rand_exog_st, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars + 1)
alpha[:cls.m] = 0
alpha[-1] = 0 # don't penalize alpha
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog)
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
cls.k_extra = 1 # 1 extra parameter in nb2
class TestNegativeBinomialGeoL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI,
loglike_method='geometric')
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last columns
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog,
loglike_method='geometric')
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
assert_equal(mod_reg.loglike_method, 'geometric')
class TestLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Logit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class TestMNLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = MNLogit(data.endog, exog_no_PSI).fit(
disp=0, tol=1e-15, method='bfgs', maxiter=1000)
def test_t_test(self):
m = self.m
kvars = self.kvars
t_unreg = self.res_unreg.t_test(np.eye(m))
t_reg = self.res_reg.t_test(np.eye(kvars))
assert_almost_equal(t_unreg.effect, t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd, t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_almost_equal(t_unreg.tvalue, t_reg.tvalue[:m, :m], DECIMAL_3)
def test_f_test(self):
raise SkipTest("Skipped test_f_test for MNLogit")
class TestProbitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Probit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class CompareL1(object):
"""
For checking results for l1 regularization.
Assumes self.res1 and self.res2 are two legitimate models to be compared.
"""
def test_basic_results(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
assert_almost_equal(self.res1.cov_params(), self.res2.cov_params(), DECIMAL_4)
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int(), DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
assert_almost_equal(self.res1.pred_table(), self.res2.pred_table(), DECIMAL_4)
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_4)
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
class CompareL11D(CompareL1):
"""
Check t and f tests. This only works for 1-d results
"""
def test_tests(self):
restrictmat = np.eye(len(self.res1.params.ravel()))
assert_almost_equal(self.res1.t_test(restrictmat).pvalue,
self.res2.t_test(restrictmat).pvalue, DECIMAL_4)
assert_almost_equal(self.res1.f_test(restrictmat).pvalue,
self.res2.f_test(restrictmat).pvalue, DECIMAL_4)
class TestL1AlphaZeroLogit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Logit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroProbit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Probit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroMNLogit(CompareL1):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = MNLogit(data.endog, data.exog).fit(disp=0, tol=1e-15,
method='bfgs',
maxiter=1000)
class TestLogitNewton(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Logit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.logit()
cls.res2 = res2
def test_resid_pearson(self):
assert_almost_equal(self.res1.resid_pearson,
self.res2.resid_pearson, 5)
def test_nodummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.})
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog1_se, DECIMAL_4)
def test_nodummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog2_se, DECIMAL_4)
def test_dummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.}, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog1_se, DECIMAL_4)
def test_dummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean',
dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog2_se, DECIMAL_4)
class TestLogitBFGS(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.logit()
cls.res2 = res2
cls.res1 = Logit(data.endog, data.exog).fit(method="bfgs", disp=0)
class TestPoissonNewton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Poisson(data.endog, exog).fit(method='newton', disp=0)
res2 = RandHIE()
res2.poisson()
cls.res2 = res2
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_overall_se, DECIMAL_4)
def test_margeff_dummy_overall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_overall_se, DECIMAL_4)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, 2)
def test_predict_prob(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
probs_res = np.loadtxt(os.path.join(cur_dir, "results",
"predict_prob_poisson.csv"), delimiter=",")
# just check the first 100 obs. vs R to save memory
probs = self.res1.predict_prob()[:100]
assert_almost_equal(probs, probs_res, 8)
class TestNegativeBinomialNB2Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(method='newton', disp=0)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(
method="newton",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialNB2BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(
method='bfgs', disp=0,
maxiter=1000)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(method="bfgs",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialGeometricBFGS(CheckModelResults):
"""
Cannot find another implementation of the geometric to cross-check results
we only test fitted values because geometric has fewer parameters than nb1 and nb2
and we want to make sure that predict() np.dot(exog, params) works
"""
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'geometric').fit(method='bfgs', disp=0)
res2 = RandHIE()
res2.negativebinomial_geometric_bfgs()
cls.res2 = res2
# the following are regression tests, could be inherited instead
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_3)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_jac(self):
pass
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10], np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def no_info(self):
pass
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_1)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_2)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
test_jac = no_info
class CheckMNLogitBaseZero(CheckModelResults):
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff, self.res2.margeff_dydx_overall, 6)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_overall_se, 6)
def test_margeff_mean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff, self.res2.margeff_dydx_mean, 7)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_mean_se, 7)
def test_margeff_dummy(self):
data = self.data
vote = data.data['vote']
exog = np.column_stack((data.exog, vote))
exog = sm.add_constant(exog, prepend=False)
res = MNLogit(data.endog, exog).fit(method="newton", disp=0)
me = res.get_margeff(dummy=True)
assert_almost_equal(me.margeff, self.res2.margeff_dydx_dummy_overall,
6)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dydx_dummy_overall_se, 6)
me = res.get_margeff(dummy=True, method="eydx")
assert_almost_equal(me.margeff, self.res2.margeff_eydx_dummy_overall,
5)
assert_almost_equal(me.margeff_se,
self.res2.margeff_eydx_dummy_overall_se, 6)
def test_j(self):
assert_equal(self.res1.model.J, self.res2.J)
def test_k(self):
assert_equal(self.res1.model.K, self.res2.K)
def test_endog_names(self):
assert_equal(self.res1._get_endog_name(None,None)[1],
['y=1', 'y=2', 'y=3', 'y=4', 'y=5', 'y=6'])
def test_pred_table(self):
# fitted results taken from gretl
pred = [6, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 6, 0, 1, 6, 0, 0,
1, 1, 6, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 6, 0, 0, 6, 6, 0, 0, 1,
1, 6, 1, 6, 0, 0, 0, 1, 0, 1, 0, 0, 0, 6, 0, 0, 6, 0, 0, 0, 1,
1, 0, 0, 6, 6, 6, 6, 1, 0, 5, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
6, 0, 6, 6, 1, 0, 1, 1, 6, 5, 1, 0, 0, 0, 5, 0, 0, 6, 0, 1, 0,
0, 0, 0, 0, 1, 1, 0, 6, 6, 6, 6, 5, 0, 1, 1, 0, 1, 0, 6, 6, 0,
0, 0, 6, 0, 0, 0, 6, 6, 0, 5, 1, 0, 0, 0, 0, 6, 0, 5, 6, 6, 0,
0, 0, 0, 6, 1, 0, 0, 1, 0, 1, 6, 1, 1, 1, 1, 1, 0, 0, 0, 6, 0,
5, 1, 0, 6, 6, 6, 0, 0, 0, 0, 1, 6, 6, 0, 0, 0, 1, 1, 5, 6, 0,
6, 1, 0, 0, 1, 6, 0, 0, 1, 0, 6, 6, 0, 5, 6, 6, 0, 0, 6, 1, 0,
6, 0, 1, 0, 1, 6, 0, 1, 1, 1, 6, 0, 5, 0, 0, 6, 1, 0, 6, 5, 5,
0, 6, 1, 1, 1, 0, 0, 6, 0, 0, 5, 0, 0, 6, 6, 6, 6, 6, 0, 1, 0,
0, 6, 6, 0, 0, 1, 6, 0, 0, 6, 1, 6, 1, 1, 1, 0, 1, 6, 5, 0, 0,
1, 5, 0, 1, 6, 6, 1, 0, 0, 1, 6, 1, 5, 6, 1, 0, 0, 1, 1, 0, 6,
1, 6, 0, 1, 1, 5, 6, 6, 5, 1, 1, 1, 0, 6, 1, 6, 1, 0, 1, 0, 0,
1, 5, 0, 1, 1, 0, 5, 6, 0, 5, 1, 1, 6, 5, 0, 6, 0, 0, 0, 0, 0,
0, 1, 6, 1, 0, 5, 1, 0, 0, 1, 6, 0, 0, 6, 6, 6, 0, 2, 1, 6, 5,
6, 1, 1, 0, 5, 1, 1, 1, 6, 1, 6, 6, 5, 6, 0, 1, 0, 1, 6, 0, 6,
1, 6, 0, 0, 6, 1, 0, 6, 1, 0, 0, 0, 0, 6, 6, 6, 6, 5, 6, 6, 0,
0, 6, 1, 1, 6, 0, 0, 6, 6, 0, 6, 6, 0, 0, 6, 0, 0, 6, 6, 6, 1,
0, 6, 0, 0, 0, 6, 1, 1, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 1, 6, 1,
0, 0, 0, 6, 6, 1, 1, 6, 5, 5, 0, 6, 6, 0, 1, 1, 0, 6, 6, 0, 6,
5, 5, 6, 5, 1, 0, 6, 0, 6, 1, 0, 1, 6, 6, 6, 1, 0, 6, 0, 5, 6,
6, 5, 0, 5, 1, 0, 6, 0, 6, 1, 5, 5, 0, 1, 5, 5, 2, 6, 6, 6, 5,
0, 0, 1, 6, 1, 0, 1, 6, 1, 0, 0, 1, 5, 6, 6, 0, 0, 0, 5, 6, 6,
6, 1, 5, 6, 1, 0, 0, 6, 5, 0, 1, 1, 1, 6, 6, 0, 1, 0, 0, 0, 5,
0, 0, 6, 1, 6, 0, 6, 1, 5, 5, 6, 5, 0, 0, 0, 0, 1, 1, 0, 5, 5,
0, 0, 0, 0, 1, 0, 6, 6, 1, 1, 6, 6, 0, 5, 5, 0, 0, 0, 6, 6, 1,
6, 0, 0, 5, 0, 1, 6, 5, 6, 6, 5, 5, 6, 6, 1, 0, 1, 6, 6, 1, 6,
0, 6, 0, 6, 5, 0, 6, 6, 0, 5, 6, 0, 6, 6, 5, 0, 1, 6, 6, 1, 0,
1, 0, 6, 6, 1, 0, 6, 6, 6, 0, 1, 6, 0, 1, 5, 1, 1, 5, 6, 6, 0,
1, 6, 6, 1, 5, 0, 5, 0, 6, 0, 1, 6, 1, 0, 6, 1, 6, 0, 6, 1, 0,
0, 0, 6, 6, 0, 1, 1, 6, 6, 6, 1, 6, 0, 5, 6, 0, 5, 6, 6, 5, 5,
5, 6, 0, 6, 0, 0, 0, 5, 0, 6, 1, 2, 6, 6, 6, 5, 1, 6, 0, 6, 0,
0, 0, 0, 6, 5, 0, 5, 1, 6, 5, 1, 6, 5, 1, 1, 0, 0, 6, 1, 1, 5,
6, 6, 0, 5, 2, 5, 5, 0, 5, 5, 5, 6, 5, 6, 6, 5, 2, 6, 5, 6, 0,
0, 6, 5, 0, 6, 0, 0, 6, 6, 6, 0, 5, 1, 1, 6, 6, 5, 2, 1, 6, 5,
6, 0, 6, 6, 1, 1, 5, 1, 6, 6, 6, 0, 0, 6, 1, 0, 5, 5, 1, 5, 6,
1, 6, 0, 1, 6, 5, 0, 0, 6, 1, 5, 1, 0, 6, 0, 6, 6, 5, 5, 6, 6,
6, 6, 2, 6, 6, 6, 5, 5, 5, 0, 1, 0, 0, 0, 6, 6, 1, 0, 6, 6, 6,
6, 6, 1, 0, 6, 1, 5, 5, 6, 6, 6, 6, 6, 5, 6, 1, 6, 2, 5, 5, 6,
5, 6, 6, 5, 6, 6, 5, 5, 6, 1, 5, 1, 6, 0, 2, 5, 0, 5, 0, 2, 1,
6, 0, 0, 6, 6, 1, 6, 0, 5, 5, 6, 6, 1, 6, 6, 6, 5, 6, 6, 1, 6,
5, 6, 1, 1, 0, 6, 6, 5, 1, 0, 0, 6, 6, 5, 6, 0, 1, 6, 0, 5, 6,
5, 2, 5, 2, 0, 0, 1, 6, 6, 1, 5, 6, 6, 0, 6, 6, 6, 6, 6, 5]
assert_array_equal(self.res1.predict().argmax(1), pred)
# the rows should add up for pred table
assert_array_equal(self.res1.pred_table().sum(0), np.bincount(pred))
# note this is just a regression test, gretl doesn't have a prediction
# table
pred = [[ 126., 41., 2., 0., 0., 12., 19.],
[ 77., 73., 3., 0., 0., 15., 12.],
[ 37., 43., 2., 0., 0., 19., 7.],
[ 12., 9., 1., 0., 0., 9., 6.],
[ 19., 10., 2., 0., 0., 20., 43.],
[ 22., 25., 1., 0., 0., 31., 71.],
[ 9., 7., 1., 0., 0., 18., 140.]]
assert_array_equal(self.res1.pred_table(), pred)
def test_resid(self):
assert_array_equal(self.res1.resid_misclassified, self.res2.resid)
class TestMNLogitNewtonBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
cls.res1 = MNLogit(data.endog, exog).fit(method="newton", disp=0)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
class TestMNLogitLBFGSBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
mymodel = MNLogit(data.endog, exog)
cls.res1 = mymodel.fit(method="lbfgs", disp=0, maxiter=50000,
#m=12, pgtol=1e-7, factr=1e3, # 5 failures
#m=20, pgtol=1e-8, factr=1e2, # 3 failures
#m=30, pgtol=1e-9, factr=1e1, # 1 failure
m=40, pgtol=1e-10, factr=5e0,
loglike_and_score=mymodel.loglike_and_score)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
def test_perfect_prediction():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris_dir = os.path.join(cur_dir, '..', '..', 'genmod', 'tests', 'results')
iris_dir = os.path.abspath(iris_dir)
iris = np.genfromtxt(os.path.join(iris_dir, 'iris.csv'), delimiter=",",
skip_header=1)
y = iris[:,-1]
X = iris[:,:-1]
X = X[y != 2]
y = y[y != 2]
X = sm.add_constant(X, prepend=True)
mod = Logit(y,X)
assert_raises(PerfectSeparationError, mod.fit, maxiter=1000)
#turn off raise PerfectSeparationError
mod.raise_on_perfect_prediction = False
# this will raise if you set maxiter high enough with a singular matrix
from pandas.util.testing import assert_produces_warning
# this is not thread-safe
with assert_produces_warning():
mod.fit(disp=False, maxiter=50) # should not raise but does warn
def test_poisson_predict():
#GH: 175, make sure poisson predict works without offset and exposure
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=True)
res = sm.Poisson(data.endog, exog).fit(method='newton', disp=0)
pred1 = res.predict()
pred2 = res.predict(exog)
assert_almost_equal(pred1, pred2)
#exta options
pred3 = res.predict(exog, offset=0, exposure=1)
assert_almost_equal(pred1, pred3)
pred3 = res.predict(exog, offset=0, exposure=2)
assert_almost_equal(2*pred1, pred3)
pred3 = res.predict(exog, offset=np.log(2), exposure=1)
assert_almost_equal(2*pred1, pred3)
def test_poisson_newton():
#GH: 24, Newton doesn't work well sometimes
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x, prepend=True)
y_count = np.random.poisson(np.exp(x.sum(1)))
mod = sm.Poisson(y_count, x)
from pandas.util.testing import assert_produces_warning
# this is not thread-safe
with assert_produces_warning():
res = mod.fit(start_params=-np.ones(4), method='newton', disp=0)
assert_(not res.mle_retvals['converged'])
def test_issue_339():
# make sure MNLogit summary works for J != K.
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
# strip the header from the test
smry = "\n".join(res1.summary().as_text().split('\n')[9:])
cur_dir = os.path.dirname(os.path.abspath(__file__))
test_case_file = os.path.join(cur_dir, 'results', 'mn_logit_summary.txt')
test_case = open(test_case_file, 'r').read()
np.testing.assert_equal(smry, test_case[:-1])
def test_issue_341():
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
x = exog[0]
np.testing.assert_equal(res1.predict(x).shape, (1,7))
np.testing.assert_equal(res1.predict(x[None]).shape, (1,7))
def test_iscount():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(1, 10, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _iscount(X)
assert_equal(count_ind, [2, 6])
def test_isdummy():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(0, 2, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _isdummy(X)
assert_equal(count_ind, [4, 6])
def test_non_binary():
y = [1, 2, 1, 2, 1, 2]
X = np.random.randn(6, 2)
np.testing.assert_raises(ValueError, Logit, y, X)
def test_mnlogit_factor():
dta = sm.datasets.anes96.load_pandas()
dta['endog'] = dta.endog.replace(dict(zip(range(7), 'ABCDEFG')))
dta.exog['constant'] = 1
mod = sm.MNLogit(dta.endog, dta.exog)
res = mod.fit(disp=0)
# smoke tests
params = res.params
summary = res.summary()
# with patsy
del dta.exog['constant']
mod = smf.mnlogit('PID ~ ' + ' + '.join(dta.exog.columns), dta.data)
res2 = mod.fit(disp=0)
res2.params
summary = res2.summary()
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
import pandas as pd
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure' : np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
# should work
mod1 = smf.poisson('Foo ~ Bar', data=df, exposure=df['exposure'])
assert_(type(mod1.exposure) is np.ndarray, msg='Exposure is not ndarray')
# make sure this raises
exposure = pd.Series(np.random.randn(5))
assert_raises(ValueError, sm.Poisson, df.Foo, df[['constant', 'Bar']],
exposure=exposure)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'],
exit=False)
| bsd-3-clause |
mjudsp/Tsallis | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
pyspeckit/pyspeckit | pyspeckit/wrappers/fith2co.py | 4 | 6739 | """
===================
H2CO fitter wrapper
===================
Wrapper to fit formaldehyde spectra.
"""
from __future__ import print_function
from .. import spectrum
from ..spectrum import units
import copy
from astropy import units as u
from six import iteritems
title_dict = {'oneone':'H$_2$CO 1$_{11}$-1$_{10}$',
'twotwo':'H$_2$CO 2$_{12}$-2$_{11}$',
'threethree':'H$_2$CO 3$_{23}$-3$_{22}$'
}
def plot_h2co(spdict, spectra, fignum=1, show_components=False,
show_hyperfine_components=False, residfignum=None, annotate=None,
clear=True, residkwargs={}, plot_fit_kwargs={}, residclear=True,
resid_overlay=False, resid_yoffsets=None,
**plotkwargs):
"""
Plot the results from a multi-h2co fit
"""
from matplotlib import pyplot
spectra.plotter.figure = pyplot.figure(fignum)
spectra.plotter.axis = spectra.plotter.figure.gca()
if clear:
spectra.plotter.figure.clf()
splist = spdict.values()
for sp in splist:
sp.xarr.convert_to_unit('km/s',quiet=True)
if hasattr(spectra.specfit,'fitter'):
sp.specfit.fitter = copy.copy(spectra.specfit.fitter)
sp.specfit.modelpars = spectra.specfit.modelpars
sp.specfit.npeaks = spectra.specfit.npeaks
sp.specfit.fitter.npeaks = spectra.specfit.npeaks
if spectra.specfit.modelpars is not None:
mf = sp.specfit.fitter.n_modelfunc
kw = spectra.specfit.fitter.modelfunc_kwargs
sp.specfit.model = mf(pars=spectra.specfit.modelpars,
**kw)(sp.xarr)
if len(splist) == 2:
axdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(212)}
elif len(splist) == 3:
axdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(223),
'threethree':pyplot.subplot(224)}
elif len(splist) == 4:
axdict = {'oneone':pyplot.subplot(221),
'twotwo':pyplot.subplot(222),
'threethree':pyplot.subplot(223),
'fourfour':pyplot.subplot(224)}
for linename,sp in iteritems(spdict):
sp.plotter.axis=axdict[linename] # permanent
sp.plotter(axis=axdict[linename],
title=title_dict[linename],
clear=clear,
**plotkwargs)
sp.specfit.Spectrum.plotter = sp.plotter
#sp.specfit.selectregion(reset=True)
if sp.specfit.modelpars is not None:
sp.specfit.plot_fit(annotate=False,
show_components=show_components,
show_hyperfine_components=show_hyperfine_components,
**plot_fit_kwargs)
sp.plotter.reset_limits()
if spdict['oneone'].specfit.modelpars is not None and annotate:
spdict['oneone'].specfit.annotate(labelspacing=0.05,
prop={'size':'small',
'stretch':'extra-condensed'},
frameon=False)
residaxdict = None
if residfignum is not None:
pyplot.figure(residfignum)
if residclear:
pyplot.clf()
if len(splist) == 2:
residaxdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(212)}
elif len(splist) == 3:
residaxdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(223),
'threethree':pyplot.subplot(224),
'fourfour':pyplot.subplot(224)}
elif len(splist) == 4:
residaxdict = {'oneone':pyplot.subplot(221),
'twotwo':pyplot.subplot(222),
'threethree':pyplot.subplot(223),
'fourfour':pyplot.subplot(224)}
elif resid_overlay:
residaxdict = axdict
residclear = False # override defaults...
residfignum = fignum
if residaxdict is not None:
for linename,sp in iteritems(spdict):
sp.specfit.Spectrum.plotter = sp.plotter
try:
yoffset = resid_yoffsets[linename]
except TypeError:
yoffset = 0.0
sp.specfit.plotresiduals(axis=residaxdict[linename],
figure=residfignum,
clear=residclear,
set_limits=False,
label=False,
yoffset=yoffset,
**residkwargs)
spectra.residaxdict = residaxdict
spectra.axisdict = axdict
spectra.plotter.axis = axdict['oneone']
spectra.specfit.fitleg = spdict['oneone'].specfit.fitleg
def BigSpectrum_to_H2COdict(sp, vrange=None):
"""
A rather complicated way to make the spdicts above given a spectrum...
"""
spdict = {}
for linename,freq in iteritems(spectrum.models.formaldehyde.central_freq_dict):
if vrange is not None:
freq_test_low = freq - freq * vrange[0]/units.speedoflight_kms
freq_test_high = freq - freq * vrange[1]/units.speedoflight_kms
else:
freq_test_low = freq_test_high = freq
if (sp.xarr.as_unit('Hz').in_range(freq_test_low*u.Hz) or
sp.xarr.as_unit('Hz').in_range(freq_test_high*u.Hz)):
spdict[linename] = sp.copy(deep=True)
spdict[linename].xarr.convert_to_unit('GHz')
spdict[linename].xarr.refX = freq
spdict[linename].xarr.refX_unit = 'Hz'
#spdict[linename].baseline = copy.copy(sp.baseline)
#spdict[linename].baseline.Spectrum = spdict[linename]
spdict[linename].specfit = sp.specfit.copy(parent=spdict[linename])
spdict[linename].xarr.convert_to_unit('km/s')
if vrange is not None:
try:
spdict[linename].crop(*vrange, unit='km/s')
except IndexError:
# if the freq in range, but there's no data in range, remove
spdict.pop(linename)
return spdict
def plotter_override(sp, vrange=None, **kwargs):
"""
Do plot_h2co with syntax similar to plotter()
"""
spdict = BigSpectrum_to_H2COdict(sp, vrange=vrange)
if len(spdict) not in (2,3,4):
raise ValueError("Not enough lines; don't need to use the H2CO plot wrapper")
plot_h2co(spdict, sp, **kwargs)
return spdict
| mit |
smartercleanup/duwamish-api | src/sa_api_v2/management/commands/ingestPrevetTags.py | 3 | 4460 | from __future__ import print_function
from django.core.management.base import BaseCommand
from django.db import transaction
import pandas as pd
import math
from sa_api_v2.models import (
Tag,
PlaceTag,
Place,
DataSet
)
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
TAG_MAPPINGS = {
"Remove-above cost": "above costs",
"Removed-cost above": "above costs",
"Removed-Cost above": "above costs",
"Remove- above cost": "above costs",
"Vetted": "Vetted",
"Vettted": "Vetted",
"vetted": "Vetted",
"Remove-illegal": "illegal",
"Remove-programmatic": "programmatic",
"Remove- programmatic": "programmatic",
"Remove-separate process": "programmatic",
"Remove-incomplete": "incomplete",
"Remove-county function": "county function",
"Remove-private": "private",
}
# 1. Create the tags on our pbdurham dataset
TAGS = [
{
"name": "Removed",
"is_enabled": False,
"children": [
{
"name": "above costs",
"color": "#c9302c"
},
{
"name": "illegal",
"color": "#c9302c"
},
{
"name": "programmatic",
"color": "#c9302c"
},
{
"name": "separate process",
"color": "#c9302c"
},
{
"name": "incomplete",
"color": "#c9302c"
},
{
"name": "county function",
"color": "#c9302c"
},
{
"name": "private",
"color": "#c9302c"
},
{
"name": "not in durham",
"color": "#c9302c"
},
]
},
{
"name": "Vetted",
"color": "#449d44"
}
]
# 2. parse the csv
FILEPATH = "./prevet-tags.csv"
# 3. get the tag name from the "Pre-Vetting Status" column
# find the tag using the tag name and TAG_MAPPINGS
# 4. get the place id from the "Mapseed ID" column
# 5. create a PlaceTag, Tag to the Place model
def create_tags():
dataset = DataSet.objects.get(display_name="pbdurham")
def create_tag(tag, parent):
is_enabled = False if tag.get("is_enabled") is False else True
color = tag.get("color", None)
tagModel = Tag.objects.create(
name=tag["name"],
color=color,
parent=parent,
is_enabled=is_enabled,
dataset=dataset
)
logger.info("creating tag: {}".format(tagModel))
for child in [tag for tag in tag.get('children', [])]:
create_tag(child, tagModel)
for tag in TAGS:
create_tag(tag, None)
def create_place_tags():
df = pd.read_csv(FILEPATH)
ideas_not_vetted = []
for index, row in df.iterrows():
# get the relevant place:
if math.isnan(row['Mapseed ID']):
logger.info("row had invalid id: {}".format(row))
continue
mapseed_id = int(row['Mapseed ID'])
logger.info("parsing mapseed id: {}".format(mapseed_id))
if mapseed_id is None:
import ipdb
ipdb.set_trace()
if type(row['Pre-Vetting Status ']) == float and math.isnan(row['Pre-Vetting Status ']):
logger.info("row has invalid prevet status: {}".format(row))
ideas_not_vetted.append(mapseed_id)
continue
status = row['Pre-Vetting Status '].strip()
tag_name = TAG_MAPPINGS.get(status, None)
logger.info("tag name: {}".format(tag_name))
if tag_name is None:
logger.info("no tag name for place id: {}".format(mapseed_id))
raise ValueError("no tag mapping for prevet status: {}". format(status))
continue
# get the relevant tag:
tag = Tag.objects.get(name=tag_name)
logger.info("Tag: {}".format(tag))
place = Place.objects.get(id=mapseed_id)
logger.info("place: {}".format(tag))
PlaceTag.objects.create(tag=tag, place=place)
logger.info("ideas not vetted: {}".format(ideas_not_vetted))
class Command(BaseCommand):
help = """
Ingest PlaceTags from a spreadsheet into our app.
"""
def handle(self, *args, **options):
with transaction.atomic():
create_tags()
create_place_tags()
| gpl-3.0 |
ygorcanalli/documenthandler | DocumentHandler/src/compare/metrics.py | 1 | 2935 | '''
Created on Dec 29, 2014
@author: ygor
'''
import numpy as np
from sklearn.metrics import metrics
def interpolated_precision_recall_curve(queries_ranking, queries_similarities, relevants):
queries_count = np.shape(queries_ranking)[0]
interpolated_precision = np.zeros(11,dtype = np.float128)
for qindex in range(0,queries_count):
tp = 0
precision, recall = [0],[0]
relevants_count = np.shape(np.nonzero(relevants[qindex]))[1]
retrieved_count = 1
for ranki in queries_ranking[qindex]:
if (queries_similarities[qindex][ranki] > 0) and (relevants[qindex][ranki] == 1):
tp += 1
precisioni = tp / retrieved_count
if relevants_count == 0:
recalli = 1
else:
recalli = tp / relevants_count
retrieved_count += 1
precision += [precisioni]
recall += [recalli]
# query's 11 levels of precision recall precision_levels[0] = max precision in recall > 0
precision_levels = []
for rank in range(0,11):
prec_ati = 0
for j in range(0,len(recall)):
if rank <= recall[j]*10:
prec_ati = max(prec_ati,precision[j])
precision_levels.append(prec_ati)
interpolated_precision[rank] += prec_ati/queries_count
del precision
del recall
auc = float("{0:1.4f}".format(metrics.auc([0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1],interpolated_precision)))
return interpolated_precision, auc
def highest_false_match_and_separation(queries_ranking, queries_similarities, relevants):
# rows = queries, col[0] = HFM and col[1] = SEP
hfm_sep_matrix = np.zeros((relevants.shape[0],2))
for queryi_index in range(0,relevants.shape[0]):
queryi_ranking = queries_ranking[queryi_index]
queryi_similarities = queries_similarities[queryi_index]
max_similarity = queryi_similarities[queryi_ranking[0]]
lowest_relevant = -1
highest_irrelevant = -1
for j in range(0,relevants.shape[1]):
ranki_pos = queryi_ranking[j]
if (highest_irrelevant == -1 and relevants[queryi_index][ranki_pos] == 0):
highest_irrelevant = ranki_pos
if (relevants[queryi_index][ranki_pos] == 1):
lowest_relevant = ranki_pos
LTM = 100*queryi_similarities[lowest_relevant] / max_similarity
HFM = 100*queryi_similarities[highest_irrelevant] / max_similarity
SEP = LTM - HFM
hfm_sep_matrix[queryi_index][0] = HFM
hfm_sep_matrix[queryi_index][1] = SEP
return hfm_sep_matrix
| mit |
shyamalschandra/scikit-learn | examples/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| bsd-3-clause |
aspilotros/YouTube_views_forecasting | Fast_DTW_Clustering.py | 1 | 1820 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 21 14:14:04 2017
@author: Alessandro
"""
#%%
# Fast Version of DTW
from scipy.spatial.distance import euclidean
from fastdtw import fastdtw
#x=df_views_norm_24m.iloc[0,0:730]
#y=df_views_norm_24m.iloc[1,0:730]
#distance, path = fastdtw(x, y, dist=euclidean)
#print(distance)
def k_means_clust_fast(data,num_clust,num_iter,w=5):
centroids = data[np.random.choice(data.shape[0], num_clust, replace=False)]
#centroids=random.sample(data,num_clust)
#centroids=data.sample(num_clust)
counter=0
for n in range(num_iter):
counter+=1
print (counter)
assignments={}
#assign data points to clusters
for ind,i in enumerate(data):
#for ind,i in data.iterrows():
min_dist=float('inf')
closest_clust=None
for c_ind,j in enumerate(centroids):
cur_dist, path = fastdtw(i,j,dist=euclidean)
if cur_dist<min_dist:
min_dist=cur_dist
closest_clust=c_ind
if closest_clust in assignments:
assignments[closest_clust].append(ind)
else:
assignments[closest_clust]=[]
#recalculate centroids of clusters
for key in assignments:
clust_sum=np.array([0])
for k in assignments[key]:
clust_sum=clust_sum+data[k]
#centroids[key]=[m/len(assignments[key]) for m in clust_sum]
centroids[key] = clust_sum/len(assignments[key])
return centroids
train = df_views_norm_24m.iloc[190000:210000,0:30].values
import matplotlib.pylab as plt
centroids=k_means_clust_fast(train,4,10,4)
for i in centroids:
plt.plot(i)
plt.show() | mit |
KarlClinckspoor/SAXS_treatment | WLM models/interactive_fit.py | 1 | 7777 | from SAXS_FF import WLM_whole_q
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import Slider, Button, CheckButtons
import os
import lmfit
def plot_fit():
data_x = []
data_y = []
data_y_err = []
datafile = 'SAXS_test.dat'
datapath = os.path.join(os.getcwd(), datafile)
with open(datapath, 'r') as fhand:
counter = 0
for line in fhand:
try:
temp_x, temp_y, temp_err = line.rstrip().split(' ')
data_x.append(float(temp_x)/10)
data_y.append(float(temp_y))
data_y_err.append(float(temp_err))
except:
pass
#print('Line invalid')
# plt.errorbar(data_x, data_y, data_y_err)
# plt.show()
# Initial parameters.
scale = 0.1440E+00 # 0.1
d_head = 0.1929E+02 # 20
rad_core = 0.8109E+01 # 8
rho_rel = 0.5999E-01 # 0.06
sigma = 0.1000E+01 # 1
back = 0.0 # 0
L = 0.5000E+04 # 5000
kuhn = 0.1000E+04 # 1000
eps = 0.1000E+01 # 1
D_CQ = 0.1050E+03 # 105
nu_rpa = 0.3846E+02 # 38
SC_pow = 0.6757E-03 # 0.000
exponent = 4
figure_bottom = 0.50
fig = plt.figure(figsize=(6, 8))
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0.15, bottom=figure_bottom)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('q/Å')
ax.set_ylabel('I(q)')
qs = np.logspace(-2.5, -0.5)
Ints = WLM_whole_q(qs, scale, d_head, rad_core, rho_rel, sigma, back, L, kuhn, eps, D_CQ, nu_rpa, SC_pow, exponent)
[line] = ax.plot(qs, Ints, linewidth=2, color='red')
line2 = ax.errorbar(data_x, data_y, data_y_err)
axis_color = 'lightgoldenrodyellow'
first_slider_bottom = figure_bottom - 0.1
slider_bottoms = [first_slider_bottom - (i - 1) * 0.03 for i in range(1, 14, 1)]
slider_height = 0.015
slider_left = 0.3
slider_width = 0.6
box_width = 0.05
box_height = box_width
# add_axes([left, bottom, width, height])
# Slider(ax, label, valmin, valmax, valinit)
scale_slider_ax = fig.add_axes([slider_left, slider_bottoms[0], slider_width, slider_height], facecolor=axis_color)
scale_slider = Slider(scale_slider_ax, 'Scale', 0.01, 1, valinit=scale)
# scale_box_ax = fig.add_axes([slider_left-0.05, slider_bottoms[0], box_width, box_height], facecolor=axis_color)
# scale_box = CheckButtons(scale_box_ax, ['', ''], [1, 1])
# def box_changed(val):
# scale_box.lines[0].set_visible(True)
# #mpl.widgets.CheckButtons
d_head_slider_ax = fig.add_axes([slider_left, slider_bottoms[1], slider_width, slider_height], facecolor=axis_color)
d_head_slider = Slider(d_head_slider_ax, 'D_head', d_head / 10, d_head * 10, valinit=d_head)
rad_core_slider_ax = fig.add_axes([slider_left, slider_bottoms[2], slider_width, slider_height], facecolor=axis_color)
rad_core_slider = Slider(rad_core_slider_ax, 'rad_core', rad_core / 10, rad_core * 10, valinit=rad_core)
rho_rel_slider_ax = fig.add_axes([slider_left, slider_bottoms[3], slider_width, slider_height], facecolor=axis_color)
rho_rel_slider = Slider(rho_rel_slider_ax, 'rho_rel', rho_rel / 10, rho_rel * 10, valinit=rho_rel)
sigma_slider_ax = fig.add_axes([slider_left, slider_bottoms[4], slider_width, slider_height], facecolor=axis_color)
sigma_slider = Slider(sigma_slider_ax, 'sigma', sigma / 10, sigma * 10, valinit=sigma)
back_slider_ax = fig.add_axes([slider_left, slider_bottoms[5], slider_width, slider_height], facecolor=axis_color)
back_slider = Slider(back_slider_ax, 'back', 0, 0.01, valinit=back)
L_slider_ax = fig.add_axes([slider_left, slider_bottoms[6], slider_width, slider_height], facecolor=axis_color)
L_slider = Slider(L_slider_ax, 'L', L / 10, L * 10, valinit=L)
kuhn_slider_ax = fig.add_axes([slider_left, slider_bottoms[7], slider_width, slider_height], facecolor=axis_color)
kuhn_slider = Slider(kuhn_slider_ax, 'kuhn', kuhn / 10, kuhn * 10, valinit=kuhn)
eps_slider_ax = fig.add_axes([slider_left, slider_bottoms[8], slider_width, slider_height], facecolor=axis_color)
eps_slider = Slider(eps_slider_ax, 'eps', eps / 10, eps * 10, valinit=eps)
D_CQ_slider_ax = fig.add_axes([slider_left, slider_bottoms[9], slider_width, slider_height], facecolor=axis_color)
D_CQ_slider = Slider(D_CQ_slider_ax, 'D_CQ', D_CQ / 10, D_CQ * 10, valinit=D_CQ)
nu_rpa_slider_ax = fig.add_axes([slider_left, slider_bottoms[10], slider_width, slider_height], facecolor=axis_color)
nu_rpa_slider = Slider(nu_rpa_slider_ax, 'nu_rpa', nu_rpa / 10, nu_rpa * 10, valinit=nu_rpa)
SC_pow_slider_ax = fig.add_axes([slider_left, slider_bottoms[11], slider_width, slider_height], facecolor=axis_color)
SC_pow_slider = Slider(SC_pow_slider_ax, 'SC_pow', SC_pow / 10, SC_pow * 10, valinit=SC_pow)
exponent_slider_ax = fig.add_axes([slider_left, slider_bottoms[12], slider_width, slider_height], facecolor=axis_color)
exponent_slider = Slider(exponent_slider_ax, 'exponent', 0, 4, valinit=exponent)
def sliders_on_changed(val):
line.set_ydata(WLM_whole_q(qs, scale_slider.val, d_head_slider.val, rad_core_slider.val, rho_rel_slider.val,
sigma_slider.val, back_slider.val, L_slider.val, kuhn_slider.val, eps_slider.val,
D_CQ_slider.val, nu_rpa_slider.val, SC_pow_slider.val, exponent_slider.val))
fig.canvas.draw_idle()
scale_slider.on_changed(sliders_on_changed)
d_head_slider.on_changed(sliders_on_changed)
rad_core_slider.on_changed(sliders_on_changed)
rho_rel_slider.on_changed(sliders_on_changed)
sigma_slider.on_changed(sliders_on_changed)
back_slider.on_changed(sliders_on_changed)
L_slider.on_changed(sliders_on_changed)
kuhn_slider.on_changed(sliders_on_changed)
eps_slider.on_changed(sliders_on_changed)
D_CQ_slider.on_changed(sliders_on_changed)
nu_rpa_slider.on_changed(sliders_on_changed)
SC_pow_slider.on_changed(sliders_on_changed)
exponent_slider.on_changed(sliders_on_changed)
reset_button_ax = fig.add_axes([0.03, 0.4, 0.1, 0.04])
reset_button = Button(reset_button_ax, 'Reset', color=axis_color, hovercolor='0.975')
# fit_button_ax = fig.add_axes([0.03, 0.3, 0.1, 0.04])
# fit_button = Button(fit_button_ax, 'Fit', color=axis_color, hovercolor='0.975')
def reset_button_on_clicked(mouse_event):
scale_slider.reset()
d_head_slider.reset()
rad_core_slider.reset()
rho_rel_slider.reset()
sigma_slider.reset()
back_slider.reset()
L_slider.reset()
kuhn_slider.reset()
eps_slider.reset()
D_CQ_slider.reset()
nu_rpa_slider.reset()
SC_pow_slider.reset()
exponent_slider.reset()
def residual(x, y, params):
pass
def fit_button_on_clicked(mouse_event):
params = lmfit.Parameters()
params.add('scale', scale_slider.val, vary=True)
params.add('d_head', d_head_slider.val, vary=True)
params.add('rad_core', rad_core_slider.val, vary=True)
params.add('rho', rho_rel_slider.val, vary=True)
params.add('sigma', sigma_slider.val, vary=True)
params.add('back', back_slider.val, vary=True)
params.add('L', L_slider.val, vary=True)
params.add('eps', eps_slider.val, vary=True)
params.add('D_CQ', D_CQ_slider.val, vary=True)
params.add('nu_rpa', nu_rpa_slider.val, vary=True)
params.add('SC_pow', SC_pow_slider.val, vary=True)
params.add('exponent', exponent_slider.val, vary=True)
#lmfit.minimize()
reset_button.on_clicked(reset_button_on_clicked)
plt.show() | gpl-3.0 |
nhejazi/scikit-learn | examples/applications/plot_topics_extraction_with_nmf_lda.py | 39 | 4820 | """
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying :class:`sklearn.decomposition.NMF` and
:class:`sklearn.decomposition.LatentDirichletAllocation` on a corpus
of documents and extract additive models of the topic structure of the
corpus. The output is a list of topics, each represented as a list of
terms (weights are not shown).
Non-negative Matrix Factorization is applied with two different objective
functions: the Frobenius norm, and the generalized Kullback-Leibler divergence.
The latter is equivalent to Probabilistic Latent Semantic Indexing.
The default parameters (n_samples / n_features / n_components) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_components = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
print()
# Fit the NMF model
print("Fitting the NMF model (Frobenius norm) with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_components, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (Frobenius norm):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
# Fit the NMF model
print("Fitting the NMF model (generalized Kullback-Leibler divergence) with "
"tf-idf features, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_components, random_state=1,
beta_loss='kullback-leibler', solver='mu', max_iter=1000, alpha=.1,
l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (generalized Kullback-Leibler divergence):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_components=n_components, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
amandalund/openmc | openmc/volume.py | 6 | 12168 | from collections import OrderedDict
from collections.abc import Iterable, Mapping
from numbers import Real, Integral
from xml.etree import ElementTree as ET
import warnings
import numpy as np
import pandas as pd
import h5py
from uncertainties import ufloat
import openmc
import openmc.checkvalue as cv
_VERSION_VOLUME = 1
class VolumeCalculation:
"""Stochastic volume calculation specifications and results.
Parameters
----------
domains : Iterable of openmc.Cell, openmc.Material, or openmc.Universe
Domains to find volumes of
samples : int
Number of samples used to generate volume estimates
lower_left : Iterable of float
Lower-left coordinates of bounding box used to sample points. If this
argument is not supplied, an attempt is made to automatically determine
a bounding box.
upper_right : Iterable of float
Upper-right coordinates of bounding box used to sample points. If this
argument is not supplied, an attempt is made to automatically determine
a bounding box.
Attributes
----------
ids : Iterable of int
IDs of domains to find volumes of
domain_type : {'cell', 'material', 'universe'}
Type of each domain
samples : int
Number of samples used to generate volume estimates
lower_left : Iterable of float
Lower-left coordinates of bounding box used to sample points
upper_right : Iterable of float
Upper-right coordinates of bounding box used to sample points
atoms : dict
Dictionary mapping unique IDs of domains to a mapping of nuclides to
total number of atoms for each nuclide present in the domain. For
example, {10: {'U235': 1.0e22, 'U238': 5.0e22, ...}}.
atoms_dataframe : pandas.DataFrame
DataFrame showing the estimated number of atoms for each nuclide present
in each domain specified.
volumes : dict
Dictionary mapping unique IDs of domains to estimated volumes in cm^3.
threshold : float
Threshold for the maxmimum standard deviation of volumes.
.. versionadded:: 0.12
trigger_type : {'variance', 'std_dev', 'rel_err'}
Value type used to halt volume calculation
.. versionadded:: 0.12
iterations : int
Number of iterations over samples (for calculations with a trigger).
.. versionadded:: 0.12
"""
def __init__(self, domains, samples, lower_left=None, upper_right=None):
self._atoms = {}
self._volumes = {}
self._threshold = None
self._trigger_type = None
self._iterations = None
cv.check_type('domains', domains, Iterable,
(openmc.Cell, openmc.Material, openmc.Universe))
if isinstance(domains[0], openmc.Cell):
self._domain_type = 'cell'
elif isinstance(domains[0], openmc.Material):
self._domain_type = 'material'
elif isinstance(domains[0], openmc.Universe):
self._domain_type = 'universe'
self.ids = [d.id for d in domains]
self.samples = samples
if lower_left is not None:
if upper_right is None:
raise ValueError('Both lower-left and upper-right coordinates '
'should be specified')
# For cell domains, try to compute bounding box and make sure
# user-specified one is valid
if self.domain_type == 'cell':
for c in domains:
ll, ur = c.bounding_box
if np.any(np.isinf(ll)) or np.any(np.isinf(ur)):
continue
if (np.any(np.asarray(lower_left) > ll) or
np.any(np.asarray(upper_right) < ur)):
warnings.warn(
"Specified bounding box is smaller than computed "
"bounding box for cell {}. Volume calculation may "
"be incorrect!".format(c.id))
self.lower_left = lower_left
self.upper_right = upper_right
else:
if self.domain_type == 'cell':
ll, ur = openmc.Union(c.region for c in domains).bounding_box
if np.any(np.isinf(ll)) or np.any(np.isinf(ur)):
raise ValueError('Could not automatically determine bounding '
'box for stochastic volume calculation.')
else:
self.lower_left = ll
self.upper_right = ur
else:
raise ValueError('Could not automatically determine bounding box '
'for stochastic volume calculation.')
@property
def ids(self):
return self._ids
@property
def samples(self):
return self._samples
@property
def lower_left(self):
return self._lower_left
@property
def upper_right(self):
return self._upper_right
@property
def threshold(self):
return self._threshold
@property
def trigger_type(self):
return self._trigger_type
@property
def iterations(self):
return self._iterations
@property
def domain_type(self):
return self._domain_type
@property
def atoms(self):
return self._atoms
@property
def volumes(self):
return self._volumes
@property
def atoms_dataframe(self):
items = []
columns = [self.domain_type.capitalize(), 'Nuclide', 'Atoms']
for uid, atoms_dict in self.atoms.items():
for name, atoms in atoms_dict.items():
items.append((uid, name, atoms))
return pd.DataFrame.from_records(items, columns=columns)
@ids.setter
def ids(self, ids):
cv.check_type('domain IDs', ids, Iterable, Real)
self._ids = ids
@samples.setter
def samples(self, samples):
cv.check_type('number of samples', samples, Integral)
cv.check_greater_than('number of samples', samples, 0)
self._samples = samples
@lower_left.setter
def lower_left(self, lower_left):
name = 'lower-left bounding box coordinates',
cv.check_type(name, lower_left, Iterable, Real)
cv.check_length(name, lower_left, 3)
self._lower_left = lower_left
@upper_right.setter
def upper_right(self, upper_right):
name = 'upper-right bounding box coordinates'
cv.check_type(name, upper_right, Iterable, Real)
cv.check_length(name, upper_right, 3)
self._upper_right = upper_right
@threshold.setter
def threshold(self, threshold):
name = 'volume std. dev. threshold'
cv.check_type(name, threshold, Real)
cv.check_greater_than(name, threshold, 0.0)
self._threshold = threshold
@trigger_type.setter
def trigger_type(self, trigger_type):
cv.check_value('tally trigger type', trigger_type,
('variance', 'std_dev', 'rel_err'))
self._trigger_type = trigger_type
@iterations.setter
def iterations(self, iterations):
name = 'volume calculation iterations'
cv.check_type(name, iterations, Integral)
cv.check_greater_than(name, iterations, 0)
self._iterations = iterations
@volumes.setter
def volumes(self, volumes):
cv.check_type('volumes', volumes, Mapping)
self._volumes = volumes
@atoms.setter
def atoms(self, atoms):
cv.check_type('atoms', atoms, Mapping)
self._atoms = atoms
def set_trigger(self, threshold, trigger_type):
"""Set a trigger on the voulme calculation
.. versionadded:: 0.12
Parameters
----------
threshold : float
Threshold for the maxmimum standard deviation of volumes
trigger_type : {'variance', 'std_dev', 'rel_err'}
Value type used to halt volume calculation
"""
self.trigger_type = trigger_type
self.threshold = threshold
@classmethod
def from_hdf5(cls, filename):
"""Load stochastic volume calculation results from HDF5 file.
Parameters
----------
filename : str
Path to volume.h5 file
Returns
-------
openmc.VolumeCalculation
Results of the stochastic volume calculation
"""
with h5py.File(filename, 'r') as f:
cv.check_filetype_version(f, "volume", _VERSION_VOLUME)
domain_type = f.attrs['domain_type'].decode()
samples = f.attrs['samples']
lower_left = f.attrs['lower_left']
upper_right = f.attrs['upper_right']
threshold = f.attrs.get('threshold')
trigger_type = f.attrs.get('trigger_type')
iterations = f.attrs.get('iterations', 1)
volumes = {}
atoms = {}
ids = []
for obj_name in f:
if obj_name.startswith('domain_'):
domain_id = int(obj_name[7:])
ids.append(domain_id)
group = f[obj_name]
volume = ufloat(*group['volume'][()])
volumes[domain_id] = volume
nucnames = group['nuclides'][()]
atoms_ = group['atoms'][()]
atom_dict = OrderedDict()
for name_i, atoms_i in zip(nucnames, atoms_):
atom_dict[name_i.decode()] = ufloat(*atoms_i)
atoms[domain_id] = atom_dict
# Instantiate some throw-away domains that are used by the constructor
# to assign IDs
with warnings.catch_warnings():
warnings.simplefilter('ignore', openmc.IDWarning)
if domain_type == 'cell':
domains = [openmc.Cell(uid) for uid in ids]
elif domain_type == 'material':
domains = [openmc.Material(uid) for uid in ids]
elif domain_type == 'universe':
domains = [openmc.Universe(uid) for uid in ids]
# Instantiate the class and assign results
vol = cls(domains, samples, lower_left, upper_right)
if trigger_type is not None:
vol.set_trigger(threshold, trigger_type.decode())
vol.iterations = iterations
vol.volumes = volumes
vol.atoms = atoms
return vol
def load_results(self, filename):
"""Load stochastic volume calculation results from an HDF5 file.
Parameters
----------
filename : str
Path to volume.h5 file
"""
results = type(self).from_hdf5(filename)
# Make sure properties match
assert set(self.ids) == set(results.ids)
assert np.all(self.lower_left == results.lower_left)
assert np.all(self.upper_right == results.upper_right)
# Copy results
self.volumes = results.volumes
self.atoms = results.atoms
def to_xml_element(self):
"""Return XML representation of the volume calculation
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing volume calculation data
"""
element = ET.Element("volume_calc")
dt_elem = ET.SubElement(element, "domain_type")
dt_elem.text = self.domain_type
id_elem = ET.SubElement(element, "domain_ids")
id_elem.text = ' '.join(str(uid) for uid in self.ids)
samples_elem = ET.SubElement(element, "samples")
samples_elem.text = str(self.samples)
ll_elem = ET.SubElement(element, "lower_left")
ll_elem.text = ' '.join(str(x) for x in self.lower_left)
ur_elem = ET.SubElement(element, "upper_right")
ur_elem.text = ' '.join(str(x) for x in self.upper_right)
if self.threshold:
trigger_elem = ET.SubElement(element, "threshold")
trigger_elem.set("type", self.trigger_type)
trigger_elem.set("threshold", str(self.threshold))
return element
| mit |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/kernel_ridge.py | 37 | 6556 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| gpl-2.0 |
tomekkorbak/treehopper | treehopper/train.py | 1 | 1859 | import re
import numpy as np
import torch
from sklearn.model_selection import KFold
from config import set_arguments
from data.split_datasets import split_dataset_kfold, split_dataset_simple
from data.vocab import build_vocab, Vocab
from model.training import train
from data.dataset import SSTDataset
def create_full_dataset(args):
train_dir = 'training-treebank'
vocab_file = 'tmp/vocab.txt'
build_vocab([
'training-treebank/rev_sentence.txt',
'training-treebank/sklad_sentence.txt',
'test/polevaltest_sentence.txt',
args.emb_dir+args.emb_file+'.vec' #full vocabulary in model
], 'tmp/vocab.txt')
vocab = Vocab(filename=vocab_file)
full_dataset = SSTDataset(train_dir, vocab, args.num_classes)
return vocab, full_dataset
def main(grid_args = None):
args = set_arguments(grid_args)
vocab, full_dataset = create_full_dataset(args)
if args.test:
test_dir = 'test'
test_dataset = SSTDataset(test_dir, vocab, args.num_classes)
max_dev_epoch, max_dev_acc, max_model_filename = train(full_dataset, test_dataset, vocab, args)
else:
train_dataset = SSTDataset(num_classes=args.num_classes)
dev_dataset = SSTDataset(num_classes=args.num_classes)
train_dataset, dev_dataset = split_dataset_simple(
full_dataset,
train_dataset,
dev_dataset,
split=args.split
)
max_dev_epoch, max_dev_acc, max_model_filename = train(train_dataset, dev_dataset, vocab, args)
with open(args.name + '_results', 'a') as result_file:
result_file.write(str(args) + '\nEpoch {epoch}, accuracy {acc:.4f}\n'.format(
epoch=max_dev_epoch,
acc=max_dev_acc
))
return max_dev_epoch, max_dev_acc, max_model_filename
if __name__ == "__main__":
main() | apache-2.0 |
massmutual/scikit-learn | sklearn/decomposition/tests/test_pca.py | 1 | 11351 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggerred it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
XENON1T/processing | montecarlo/fax_waveform/TruthSorting.py | 1 | 5121 | ###########################
## Code for sorting the truth root file(peak-by-peak) format into event-by-event format
## Output is a pickle file
## by Qing Lin
#########
## @ 2017-01-09
## Please NOTE the code can only be used with single S1&S2 simulation
## Double/Multiple peak simulation is not available in merging/minitree yet
###########################
import pickle
import pandas as pd
import ROOT
from ROOT import TFile
from ROOT import TTree
import root_pandas
import sys
if len(sys.argv)<2:
print("============= Syntax =============")
print("python TruthSorting.py <truth file.csv (abs.)> <output file (no ext)> <output format; 0=pickle (default), 1=ROOT, 2=both>")
exit()
TruthFile = sys.argv[1]
OutputFile = sys.argv[2]
if '.root' in OutputFile:
OutputFile = OutputFile.split('.root')[0]
else:
OutputFile = OutputFile.split('.pkl')[0]
OutputFormat=0
if len(sys.argv)>3:
OutputFormat = float(sys.argv[3])
print ("Input file: ", TruthFile)
#################
## load the root files
## and TTrees
#################
###################
## need to sort and add the truth peak values into Data as well
## In truth file we want to keep both first and second largest peak
## both in time mean, sigma and area
####################
Data = {}
# load the truth data from csv
truth_data = pd.read_csv(TruthFile)
NumStepsInTruth = len(truth_data.index)
# initialize Data for truth
Data['index_truth'] = []
Data['s1_time_truth'] = []
Data['s1_time_std_truth'] = []
Data['s1_area_truth'] = []
Data['s1_area_top_fraction_truth'] = []
Data['s2_time_truth'] = []
Data['s2_electron_time_truth'] = []
Data['s2_first_electron_time_truth'] = []
Data['s2_time_std_truth'] = []
Data['s2_area_truth'] = []
Data['s2_area_top_fraction_truth'] = []
Data['x_truth'] = []
Data['y_truth'] = []
iteration_id = 0
for event_id in range(10000000):
if iteration_id>=NumStepsInTruth:
break
if (event_id+1)%100==0:
print("==== processed_file: "+str(event_id+1)+" events finished loading")
s1_time_truth = -1
s1_time_std_truth = -1
s1_area_truth = -1
s1_area_top_fraction_truth = -1
s2_electron_time_truth = -1
s2_first_electron_time_truth = -1
s2_time_truth = -1
s2_time_std_truth = -1
s2_area_truth = -1
s2_area_top_fraction_truth = -1
x_truth = -1e10
y_truth = -1e10
ifcounteds1 = 0
while truth_data['event'][iteration_id]==event_id:
tag = 2 # 0 for s1, 1 for s2, 2 for photoionization
if truth_data['peak_type'][iteration_id] == 's1':
tag = 0
if truth_data['peak_type'][iteration_id] == 's2':
tag = 1
if tag==0:
#print("Iterator: "+str(iteration_id)+" -> S1")
s1_time_truth = truth_data['t_mean_photons'][iteration_id]
s1_time_std_truth = truth_data['t_sigma_photons'][iteration_id]
s1_area_truth = truth_data['n_photons'][iteration_id]
s1_area_top_fraction_truth = truth_data['top_fraction'][iteration_id]
elif tag==1:
#print("Iterator: "+str(iteration_id)+" -> S2")
s2_electron_time_truth = truth_data['t_mean_electrons'][iteration_id]
s2_first_electron_time_truth = truth_data['t_first_electron'][iteration_id]
s2_time_truth = truth_data['t_mean_photons'][iteration_id]
s2_time_std_truth = truth_data['t_sigma_photons'][iteration_id]
s2_area_truth = truth_data['n_photons'][iteration_id]
s2_area_top_fraction_truth = truth_data['top_fraction'][iteration_id]
x_truth = truth_data['x'][iteration_id]
y_truth = truth_data['y'][iteration_id]
iteration_id += 1
if iteration_id>=NumStepsInTruth:
break
Data['index_truth'].append(event_id)
Data['s1_time_truth'].append(s1_time_truth)
Data['s1_time_std_truth'].append(s1_time_std_truth)
Data['s1_area_truth'].append(s1_area_truth)
Data['s2_electron_time_truth'].append(s2_electron_time_truth)
Data['s2_first_electron_time_truth'].append(s2_first_electron_time_truth)
Data['s2_time_truth'].append(s2_time_truth)
Data['s2_time_std_truth'].append(s2_time_std_truth)
Data['s2_area_truth'].append(s2_area_truth)
Data['s1_area_top_fraction_truth'].append(s1_area_top_fraction_truth)
Data['s2_area_top_fraction_truth'].append(s2_area_top_fraction_truth)
Data['x_truth'].append(x_truth)
Data['y_truth'].append(y_truth)
print ("Number of events: ", event_id)
######################
## Convert to data format in pandas
######################
PandasData = {}
for item in Data:
PandasData[item] = pd.Series(Data[item])
df = pd.DataFrame(PandasData)
#######################
## Save to ROOT
#######################
if OutputFormat == 1 or OutputFormat == 2:
df.to_root(OutputFile+".root", 'fax_truth_sort')
print ("Written to: ", OutputFile+".root")
#######################
## Save to pickle
#######################
if OutputFormat == 0 or OutputFormat == 2:
pickle.dump(df, open(OutputFile+".pkl", 'wb'))
print ("Written to: ", OutputFile+".pkl")
| apache-2.0 |
DEAP/deap | examples/ga/nsga2.py | 11 | 5224 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import array
import random
import json
import numpy
from math import sqrt
from deap import algorithms
from deap import base
from deap import benchmarks
from deap.benchmarks.tools import diversity, convergence, hypervolume
from deap import creator
from deap import tools
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
# Problem definition
# Functions zdt1, zdt2, zdt3, zdt6 have bounds [0, 1]
BOUND_LOW, BOUND_UP = 0.0, 1.0
# Functions zdt4 has bounds x1 = [0, 1], xn = [-5, 5], with n = 2, ..., 10
# BOUND_LOW, BOUND_UP = [0.0] + [-5.0]*9, [1.0] + [5.0]*9
# Functions zdt1, zdt2, zdt3 have 30 dimensions, zdt4 and zdt6 have 10
NDIM = 30
def uniform(low, up, size=None):
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, NDIM)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", benchmarks.zdt1)
toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0)
toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0/NDIM)
toolbox.register("select", tools.selNSGA2)
def main(seed=None):
random.seed(seed)
NGEN = 250
MU = 100
CXPB = 0.9
stats = tools.Statistics(lambda ind: ind.fitness.values)
# stats.register("avg", numpy.mean, axis=0)
# stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
pop = toolbox.population(n=MU)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# This is just to assign the crowding distance to the individuals
# no actual selection is done
pop = toolbox.select(pop, len(pop))
record = stats.compile(pop)
logbook.record(gen=0, evals=len(invalid_ind), **record)
print(logbook.stream)
# Begin the generational process
for gen in range(1, NGEN):
# Vary the population
offspring = tools.selTournamentDCD(pop, len(pop))
offspring = [toolbox.clone(ind) for ind in offspring]
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() <= CXPB:
toolbox.mate(ind1, ind2)
toolbox.mutate(ind1)
toolbox.mutate(ind2)
del ind1.fitness.values, ind2.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population
pop = toolbox.select(pop + offspring, MU)
record = stats.compile(pop)
logbook.record(gen=gen, evals=len(invalid_ind), **record)
print(logbook.stream)
print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))
return pop, logbook
if __name__ == "__main__":
# with open("pareto_front/zdt1_front.json") as optimal_front_data:
# optimal_front = json.load(optimal_front_data)
# Use 500 of the 1000 points in the json file
# optimal_front = sorted(optimal_front[i] for i in range(0, len(optimal_front), 2))
pop, stats = main()
# pop.sort(key=lambda x: x.fitness.values)
# print(stats)
# print("Convergence: ", convergence(pop, optimal_front))
# print("Diversity: ", diversity(pop, optimal_front[0], optimal_front[-1]))
# import matplotlib.pyplot as plt
# import numpy
# front = numpy.array([ind.fitness.values for ind in pop])
# optimal_front = numpy.array(optimal_front)
# plt.scatter(optimal_front[:,0], optimal_front[:,1], c="r")
# plt.scatter(front[:,0], front[:,1], c="b")
# plt.axis("tight")
# plt.show()
| lgpl-3.0 |
shahankhatch/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
jseabold/statsmodels | statsmodels/gam/tests/test_gam.py | 5 | 26227 | # pylint: disable=F841
"""
unit test for GAM
Author: Luca Puggini
Created on 08/07/2015
"""
import os
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from scipy.linalg import block_diag
import pytest
from statsmodels.tools.linalg import matrix_sqrt
from statsmodels.gam.smooth_basis import (
UnivariatePolynomialSmoother, PolynomialSmoother, BSplines,
GenericSmoothers, UnivariateCubicSplines, CyclicCubicSplines)
from statsmodels.gam.generalized_additive_model import (
GLMGam, LogitGam, make_augmented_matrix, penalized_wls)
from statsmodels.gam.gam_cross_validation.gam_cross_validation import (
MultivariateGAMCV, MultivariateGAMCVPath, _split_train_test_smoothers)
from statsmodels.gam.gam_penalties import (UnivariateGamPenalty,
MultivariateGamPenalty)
from statsmodels.gam.gam_cross_validation.cross_validators import KFold
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families.family import Gaussian
from statsmodels.genmod.generalized_linear_model import lm
sigmoid = np.vectorize(lambda x: 1.0 / (1.0 + np.exp(-x)))
def polynomial_sample_data():
"""A polynomial of degree 4
poly = ax^4 + bx^3 + cx^2 + dx + e
second der = 12ax^2 + 6bx + 2c
integral from -1 to 1 of second der^2 is
(288 a^2)/5 + 32 a c + 8 (3 b^2 + c^2)
the gradient of the integral is der
[576*a/5 + 32 * c, 48*b, 32*a + 16*c, 0, 0]
Returns
-------
poly : smoother instance
y : ndarray
generated function values, demeaned
"""
n = 10000
x = np.linspace(-1, 1, n)
y = 2 * x ** 3 - x
y -= y.mean()
degree = [4]
pol = PolynomialSmoother(x, degree)
return pol, y
def integral(params):
d, c, b, a = params
itg = (288 * a ** 2) / 5 + (32 * a * c) + 8 * (3 * b ** 2 + c ** 2)
itg /= 2
return itg
def grad(params):
d, c, b, a = params
grd = np.array([576 * a / 5 + 32 * c, 48 * b, 32 * a + 16 * c, 0])
grd = grd[::-1]
return grd / 2
def hessian(params):
hess = np.array([[576 / 5, 0, 32, 0],
[0, 48, 0, 0],
[32, 0, 16, 0],
[0, 0, 0, 0]
])
return hess / 2
def cost_function(params, pol, y, alpha):
# this should be the MSE or log likelihood value
lin_pred = np.dot(pol.basis, params)
gaussian = Gaussian()
expval = gaussian.link.inverse(lin_pred)
loglike = gaussian.loglike(y, expval)
# this is the vale of the GAM penalty. For the example polynomial
itg = integral(params)
# return the cost function of the GAM for the given polynomial
return loglike - alpha * itg, loglike, itg
def test_gam_penalty():
"""
test the func method of the gam penalty
:return:
"""
pol, y = polynomial_sample_data()
univ_pol = pol.smoothers[0]
alpha = 1
gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=univ_pol)
for _ in range(10):
params = np.random.randint(-2, 2, 4)
gp_score = gp.func(params)
itg = integral(params)
assert_allclose(gp_score, itg, atol=1.e-1)
def test_gam_gradient():
# test the gam gradient for the example polynomial
np.random.seed(1)
pol, y = polynomial_sample_data()
alpha = 1
smoother = pol.smoothers[0]
gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=smoother)
for _ in range(10):
params = np.random.uniform(-2, 2, 4)
params = np.array([1, 1, 1, 1])
gam_grad = gp.deriv(params)
grd = grad(params)
assert_allclose(gam_grad, grd, rtol=1.e-2, atol=1.e-2)
def test_gam_hessian():
# test the deriv2 method of the gam penalty
np.random.seed(1)
pol, y = polynomial_sample_data()
univ_pol = pol.smoothers[0]
alpha = 1
gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=univ_pol)
for _ in range(10):
params = np.random.randint(-2, 2, 5)
gam_der2 = gp.deriv2(params)
hess = hessian(params)
hess = np.flipud(hess)
hess = np.fliplr(hess)
assert_allclose(gam_der2, hess, atol=1.e-13, rtol=1.e-3)
def test_approximation():
np.random.seed(1)
poly, y = polynomial_sample_data()
alpha = 1
for _ in range(10):
params = np.random.uniform(-1, 1, 4)
cost, err, itg = cost_function(params, poly, y, alpha)
glm_gam = GLMGam(y, smoother=poly, alpha=alpha)
# TODO: why do we need pen_weight=1
gam_loglike = glm_gam.loglike(params, scale=1, pen_weight=1)
assert_allclose(err - itg, cost, rtol=1e-10)
assert_allclose(gam_loglike, cost, rtol=0.1)
def test_gam_glm():
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.y.values
df = [10]
degree = [3]
bsplines = BSplines(x, degree=degree, df=df, include_intercept=True)
# y_mgcv is obtained from R with the following code
# g = gam(y~s(x, k = 10, bs = "cr"), data = data, scale = 80)
y_mgcv = np.asarray(data_from_r.y_est)
alpha = 0.1 # chosen by trial and error
glm_gam = GLMGam(y, smoother=bsplines, alpha=alpha)
res_glm_gam = glm_gam.fit(method='bfgs', max_start_irls=0,
disp=1, maxiter=10000, maxfun=5000)
y_gam0 = np.dot(bsplines.basis, res_glm_gam.params)
y_gam = np.asarray(res_glm_gam.fittedvalues)
assert_allclose(y_gam, y_gam0, rtol=1e-10)
# plt.plot(x, y_gam, '.', label='gam')
# plt.plot(x, y_mgcv, '.', label='mgcv')
# plt.plot(x, y, '.', label='y')
# plt.legend()
# plt.show()
assert_allclose(y_gam, y_mgcv, atol=1.e-2)
def test_gam_discrete():
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.ybin.values
df = [10]
degree = [5]
bsplines = BSplines(x, degree=degree, df=df, include_intercept=True)
# y_mgcv is obtained from R with the following code
# g = gam(y~s(x, k = 10, bs = "cr"), data = data, scale = 80)
y_mgcv = data_from_r.ybin_est
alpha = 0.00002
# gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=bsplines)
# lg_gam = LogitGam(y, bsplines.basis, penal=gp)
#
lg_gam = LogitGam(y, bsplines, alpha=alpha)
res_lg_gam = lg_gam.fit(maxiter=10000)
y_gam = np.dot(bsplines.basis, res_lg_gam.params)
y_gam = sigmoid(y_gam)
y_mgcv = sigmoid(y_mgcv)
# plt.plot(x, y_gam, label='gam')
# plt.plot(x, y_mgcv, label='mgcv')
# plt.plot(x, y, '.', label='y')
# plt.ylim(-0.4, 1.4)
# plt.legend()
# plt.show()
assert_allclose(y_gam, y_mgcv, rtol=1.e-10, atol=1.e-1)
def multivariate_sample_data(seed=1):
n = 1000
x1 = np.linspace(-1, 1, n)
x2 = np.linspace(-10, 10, n)
x = np.vstack([x1, x2]).T
np.random.seed(seed)
y = x1 * x1 * x1 + x2 + np.random.normal(0, 0.01, n)
degree1 = 4
degree2 = 3
degrees = [degree1, degree2]
pol = PolynomialSmoother(x, degrees)
return x, y, pol
def test_multivariate_penalty():
alphas = [1, 2]
weights = [1, 1]
np.random.seed(1)
x, y, pol = multivariate_sample_data()
univ_pol1 = UnivariatePolynomialSmoother(x[:, 0], degree=pol.degrees[0])
univ_pol2 = UnivariatePolynomialSmoother(x[:, 1], degree=pol.degrees[1])
gp1 = UnivariateGamPenalty(alpha=alphas[0], univariate_smoother=univ_pol1)
gp2 = UnivariateGamPenalty(alpha=alphas[1], univariate_smoother=univ_pol2)
with pytest.warns(UserWarning, match="weights is currently ignored"):
mgp = MultivariateGamPenalty(multivariate_smoother=pol, alpha=alphas,
weights=weights)
for i in range(10):
params1 = np.random.randint(-3, 3, pol.smoothers[0].dim_basis)
params2 = np.random.randint(-3, 3, pol.smoothers[1].dim_basis)
params = np.concatenate([params1, params2])
c1 = gp1.func(params1)
c2 = gp2.func(params2)
c = mgp.func(params)
assert_allclose(c, c1 + c2, atol=1.e-10, rtol=1.e-10)
d1 = gp1.deriv(params1)
d2 = gp2.deriv(params2)
d12 = np.concatenate([d1, d2])
d = mgp.deriv(params)
assert_allclose(d, d12)
h1 = gp1.deriv2(params1)
h2 = gp2.deriv2(params2)
h12 = block_diag(h1, h2)
h = mgp.deriv2(params)
assert_allclose(h, h12)
def test_generic_smoother():
x, y, poly = multivariate_sample_data()
alphas = [0.4, 0.7]
weights = [1, 1] # noqa: F841
gs = GenericSmoothers(poly.x, poly.smoothers)
gam_gs = GLMGam(y, smoother=gs, alpha=alphas)
gam_gs_res = gam_gs.fit()
gam_poly = GLMGam(y, smoother=poly, alpha=alphas)
gam_poly_res = gam_poly.fit()
assert_allclose(gam_gs_res.params, gam_poly_res.params)
def test_multivariate_gam_1d_data():
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.y
df = [10]
degree = [3]
bsplines = BSplines(x, degree=degree, df=df)
# y_mgcv is obtained from R with the following code
# g = gam(y~s(x, k = 10, bs = "cr"), data = data, scale = 80)
y_mgcv = data_from_r.y_est
# alpha is by manually adjustment to reduce discrepancy in fittedvalues
alpha = [0.0168 * 0.0251 / 2 * 500]
gp = MultivariateGamPenalty(bsplines, alpha=alpha) # noqa: F841
glm_gam = GLMGam(y, exog=np.ones((len(y), 1)), smoother=bsplines,
alpha=alpha)
# "nm" converges to a different params, "bfgs" params are close to pirls
# res_glm_gam = glm_gam.fit(method='nm', max_start_irls=0,
# disp=1, maxiter=10000, maxfun=5000)
res_glm_gam = glm_gam.fit(method='pirls', max_start_irls=0,
disp=1, maxiter=10000)
y_gam = res_glm_gam.fittedvalues
# plt.plot(x, y_gam, '.', label='gam')
# plt.plot(x, y_mgcv, '.', label='mgcv')
# plt.plot(x, y, '.', label='y')
# plt.legend()
# plt.show()
assert_allclose(y_gam, y_mgcv, atol=0.01)
def test_multivariate_gam_cv():
# SMOKE test
# no test is performed. It only checks that there is not any runtime error
def cost(x1, x2):
return np.linalg.norm(x1 - x2) / len(x1)
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.y.values
df = [10]
degree = [5]
bsplines = BSplines(x, degree=degree, df=df)
# y_mgcv is obtained from R with the following code
# g = gam(y~s(x, k = 10, bs = "cr"), data = data, scale = 80)
alphas = [0.0251]
alphas = [2]
cv = KFold(3)
gp = MultivariateGamPenalty(bsplines, alpha=alphas) # noqa: F841
gam_cv = MultivariateGAMCV(smoother=bsplines, alphas=alphas, gam=GLMGam,
cost=cost, endog=y, exog=None, cv_iterator=cv)
gam_cv_res = gam_cv.fit() # noqa: F841
def test_multivariate_gam_cv_path():
def sample_metric(y1, y2):
return np.linalg.norm(y1 - y2) / len(y1)
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.y.values
se_from_mgcv = data_from_r.y_est_se # noqa: F841
y_mgcv = data_from_r.y_mgcv_gcv # noqa: F841
df = [10]
degree = [6]
bsplines = BSplines(x, degree=degree, df=df, include_intercept=True)
gam = GLMGam
alphas = [np.linspace(0, 2, 10)]
k = 3
cv = KFold(k_folds=k, shuffle=True)
# Note: kfold cv uses random shuffle
np.random.seed(123)
gam_cv = MultivariateGAMCVPath(smoother=bsplines, alphas=alphas, gam=gam,
cost=sample_metric, endog=y, exog=None,
cv_iterator=cv)
gam_cv_res = gam_cv.fit() # noqa: F841
glm_gam = GLMGam(y, smoother=bsplines, alpha=gam_cv.alpha_cv)
res_glm_gam = glm_gam.fit(method='irls', max_start_irls=0,
disp=1, maxiter=10000)
y_est = res_glm_gam.predict(bsplines.basis)
# plt.plot(x, y, '.', label='y')
# plt.plot(x, y_est, '.', label='y est')
# plt.plot(x, y_mgcv, '.', label='y mgcv')
# plt.legend()
# plt.show()
# The test compares to result obtained with GCV and not KFOLDS CV.
# This is because MGCV does not support KFOLD CV
assert_allclose(data_from_r.y_mgcv_gcv, y_est, atol=1.e-1, rtol=1.e-1)
# Note: kfold cv uses random shuffle
np.random.seed(123)
alpha_cv, res_cv = glm_gam.select_penweight_kfold(alphas=alphas, k_folds=3)
assert_allclose(alpha_cv, gam_cv.alpha_cv, rtol=1e-12)
def test_train_test_smoothers():
n = 6
x = np.zeros(shape=(n, 2))
x[:, 0] = range(6)
x[:, 1] = range(6, 12)
poly = PolynomialSmoother(x, degrees=[3, 3])
train_index = list(range(3))
test_index = list(range(3, 6))
train_smoother, test_smoother = _split_train_test_smoothers(poly.x, poly,
train_index,
test_index)
expected_train_basis = [[0., 0., 0., 6., 36., 216.],
[1., 1., 1., 7., 49., 343.],
[2., 4., 8., 8., 64., 512.]]
assert_allclose(train_smoother.basis, expected_train_basis)
expected_test_basis = [[3., 9., 27., 9., 81., 729.],
[4., 16., 64., 10., 100., 1000.],
[5., 25., 125., 11., 121., 1331.]]
assert_allclose(test_smoother.basis, expected_test_basis)
def test_get_sqrt():
n = 1000
np.random.seed(1)
x = np.random.normal(0, 1, (n, 3))
x2 = np.dot(x.T, x)
sqrt_x2 = matrix_sqrt(x2)
x2_reconstruction = np.dot(sqrt_x2.T, sqrt_x2)
assert_allclose(x2_reconstruction, x2)
def test_make_augmented_matrix():
np.random.seed(1)
n = 500
x = np.random.uniform(-1, 1, (n, 3))
s = np.dot(x.T, x)
y = np.array(list(range(n)))
w = np.random.uniform(0, 1, n)
nobs, n_columns = x.shape
# matrix_sqrt removes redundant rows,
# if alpha is zero, then no augmentation is needed
alpha = 0
aug_y, aug_x, aug_w = make_augmented_matrix(y, x, alpha * s, w)
expected_aug_x = x
assert_allclose(aug_x, expected_aug_x)
expected_aug_y = y
expected_aug_y[:nobs] = y
assert_allclose(aug_y, expected_aug_y)
expected_aug_w = w
assert_allclose(aug_w, expected_aug_w)
alpha = 1
aug_y, aug_x, aug_w = make_augmented_matrix(y, x, s, w)
rs = matrix_sqrt(alpha * s)
# alternative version to matrix_sqrt using cholesky is not available
# rs = sp.linalg.cholesky(alpha * s)
assert_allclose(np.dot(rs.T, rs), alpha * s)
expected_aug_x = np.vstack([x, rs])
assert_allclose(aug_x, expected_aug_x)
expected_aug_y = np.zeros(shape=(nobs + n_columns,))
expected_aug_y[:nobs] = y
assert_allclose(aug_y, expected_aug_y)
expected_aug_w = np.concatenate((w, [1] * n_columns), axis=0)
assert_allclose(aug_w, expected_aug_w)
def test_penalized_wls():
np.random.seed(1)
n = 20
p = 3
x = np.random.normal(0, 1, (n, 3))
y = x[:, 1] - x[:, 2] + np.random.normal(0, .1, n)
y -= y.mean()
weights = np.ones(shape=(n,))
s = np.random.normal(0, 1, (p, p))
pen_wls_res = penalized_wls(y, x, 0 * s, weights)
ls_res = lm.OLS(y, x).fit()
assert_allclose(ls_res.params, pen_wls_res.params)
def test_cyclic_cubic_splines():
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results",
"cubic_cyclic_splines_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
x = data_from_r[['x0', 'x2']].values
y = data_from_r['y'].values
y_est_mgcv = data_from_r[['y_est']].values # noqa: F841
s_mgcv = data_from_r[['s(x0)', 's(x2)']].values
dfs = [10, 10]
ccs = CyclicCubicSplines(x, df=dfs)
alpha = [0.05 / 2, 0.0005 / 2]
# TODO: if alpha changes in pirls this should be updated
gam = GLMGam(y, smoother=ccs, alpha=alpha)
gam_res = gam.fit(method='pirls')
s0 = np.dot(ccs.basis[:, ccs.mask[0]],
gam_res.params[ccs.mask[0]])
# TODO: Mean has to be removed
# removing mean could be replaced by options for intercept handling
s0 -= s0.mean()
s1 = np.dot(ccs.basis[:, ccs.mask[1]],
gam_res.params[ccs.mask[1]])
s1 -= s1.mean() # TODO: Mean has to be removed
# plt.subplot(2, 1, 1)
# plt.plot(x[:, 0], s0, '.', label='s0')
# plt.plot(x[:, 0], s_mgcv[:, 0], '.', label='s0_mgcv')
# plt.legend(loc='best')
#
# plt.subplot(2, 1, 2)
# plt.plot(x[:, 1], s1, '.', label='s1_est')
# plt.plot(x[:, 1], s_mgcv[:, 1], '.', label='s1_mgcv')
# plt.legend(loc='best')
# plt.show()
assert_allclose(s0, s_mgcv[:, 0], atol=0.02)
assert_allclose(s1, s_mgcv[:, 1], atol=0.33)
def test_multivariate_cubic_splines():
np.random.seed(0)
from statsmodels.gam.smooth_basis import CubicSplines
n = 500
x1 = np.linspace(-3, 3, n)
x2 = np.linspace(0, 1, n)**2
x = np.vstack([x1, x2]).T
y1 = np.sin(x1) / x1
y2 = x2 * x2
y0 = y1 + y2
# need small enough noise variance to get good estimate for this test
y = y0 + np.random.normal(0, .3 / 2, n)
y -= y.mean()
y0 -= y0.mean()
alphas = [1e-3, 1e-3]
cs = CubicSplines(x, df=[10, 10], constraints='center')
gam = GLMGam(y, exog=np.ones((n, 1)), smoother=cs, alpha=alphas)
gam_res = gam.fit(method='pirls')
y_est = gam_res.fittedvalues
y_est -= y_est.mean()
# cut the tails
index = list(range(50, n - 50))
y_est = y_est[index]
y0 = y0[index]
y = y[index]
# plt.plot(y_est, label='y est')
# plt.plot(y0, label='y0')
# plt.plot(y, '.', label='y')
# plt.legend(loc='best')
# plt.show()
assert_allclose(y_est, y0, atol=0.04)
def test_glm_pirls_compatibility():
np.random.seed(0)
n = 500
x1 = np.linspace(-3, 3, n)
x2 = np.random.rand(n)
x = np.vstack([x1, x2]).T
y1 = np.sin(x1) / x1
y2 = x2 * x2
y0 = y1 + y2
y = y0 + np.random.normal(0, .3, n)
y -= y.mean()
y0 -= y0.mean()
# TODO: we have now alphas == alphas_glm
alphas = [5.75] * 2
alphas_glm = [1.2] * 2 # noqa: F841
# using constraints avoids singular exog.
cs = BSplines(x, df=[10, 10], degree=[3, 3], constraints='center')
gam_pirls = GLMGam(y, smoother=cs, alpha=alphas)
gam_glm = GLMGam(y, smoother=cs, alpha=alphas)
gam_res_glm = gam_glm.fit(method='nm', max_start_irls=0,
disp=1, maxiter=20000, maxfun=10000)
gam_res_glm = gam_glm.fit(start_params=gam_res_glm.params,
method='bfgs', max_start_irls=0,
disp=1, maxiter=20000, maxfun=10000)
gam_res_pirls = gam_pirls.fit()
y_est_glm = np.dot(cs.basis, gam_res_glm.params)
y_est_glm -= y_est_glm.mean()
y_est_pirls = np.dot(cs.basis, gam_res_pirls.params)
y_est_pirls -= y_est_pirls.mean()
# plt.plot(y_est_pirls)
# plt.plot(y_est_glm)
# plt.plot(y, '.')
# plt.show()
assert_allclose(gam_res_glm.params, gam_res_pirls.params, atol=5e-5,
rtol=5e-5)
assert_allclose(y_est_glm, y_est_pirls, atol=5e-5)
def test_zero_penalty():
x, y, poly = multivariate_sample_data()
alphas = [0, 0]
gam_gs = GLMGam(y, smoother=poly, alpha=alphas)
gam_gs_res = gam_gs.fit()
y_est_gam = gam_gs_res.predict()
glm = GLM(y, poly.basis).fit()
y_est = glm.predict()
assert_allclose(y_est, y_est_gam)
def test_spl_s():
# matrix from R
spl_s_R = [[0, 0, 0.000000000, 0.000000000, 0.000000000, 0.000000000],
[0, 0, 0.000000000, 0.000000000, 0.000000000, 0.000000000],
[0, 0, 0.001400000, 0.000200000, -0.001133333, -0.001000000],
[0, 0, 0.000200000, 0.002733333, 0.001666667, -0.001133333],
[0, 0, -0.001133333, 0.001666667, 0.002733333, 0.000200000],
[0, 0, -0.001000000, -0.001133333, 0.000200000, 0.001400000]]
np.random.seed(1)
x = np.random.normal(0, 1, 10)
xk = np.array([0.2, .4, .6, .8])
cs = UnivariateCubicSplines(x, df=4)
cs.knots = xk
spl_s = cs._splines_s()
assert_allclose(spl_s_R, spl_s, atol=4.e-10)
def test_partial_values2():
np.random.seed(0)
n = 1000
x = np.random.uniform(0, 1, (n, 2))
x = x - x.mean()
y = x[:, 0] * x[:, 0] + np.random.normal(0, .01, n)
y -= y.mean()
alpha = 0.0
# BUG: mask is incorrect if exog is not None, start_idx missing
# bsplines = BSplines(x, degree=[3] * 2, df=[10] * 2)
# glm_gam = GLMGam(y, exog=np.ones((len(y), 1)), smoother=bsplines,
# alpha=alpha)
bsplines = BSplines(x, degree=[3] * 2, df=[10] * 2,
include_intercept=[True, False])
glm_gam = GLMGam(y, smoother=bsplines, alpha=alpha)
res_glm_gam = glm_gam.fit(method='pirls', max_start_irls=0,
disp=0, maxiter=5000)
glm = GLM(y, bsplines.basis) # noqa: F841
# case with constant column in exog is currently wrong
# ex = np.column_stack((np.zeros((len(y), 1)), bsplines.smoothers[0].basis,
# np.zeros_like(bsplines.smoothers[1].basis) ))
ex = np.column_stack((bsplines.smoothers[0].basis,
np.zeros_like(bsplines.smoothers[1].basis)))
y_est = res_glm_gam.predict(ex, transform=False)
y_partial_est, se = res_glm_gam.partial_values(0)
assert_allclose(y_est, y_partial_est, atol=0.05)
assert se.min() < 100
# TODO: sometimes the SE reported by partial_values is very large.
# This should be double checked
def test_partial_values():
# this test is only approximate because we do not use the same spline
# basis functions (knots) as mgcv
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.y.values
se_from_mgcv = data_from_r.y_est_se
df = [10]
degree = [6]
bsplines = BSplines(x, degree=degree, df=df, include_intercept=True)
# TODO: alpha found by trial and error to pass assert
alpha = 0.025 / 115 * 500
glm_gam = GLMGam(y, smoother=bsplines, alpha=alpha)
res_glm_gam = glm_gam.fit(maxiter=10000, method='bfgs')
# TODO: if IRLS is used res_glm_gam has not partial_values.
univ_bsplines = bsplines.smoothers[0] # noqa: F841
hat_y, se = res_glm_gam.partial_values(0)
assert_allclose(hat_y, data_from_r["y_est"], rtol=0, atol=0.008)
# TODO: bug missing scale
bug_fact = np.sqrt(res_glm_gam.scale) * 0.976 # this is = 0.106
assert_allclose(se, se_from_mgcv * bug_fact, rtol=0, atol=0.008)
@pytest.mark.matplotlib
def test_partial_plot():
# verify that plot and partial_values method agree
# the model only has one component so partial values is the same as
# fittedvalues
# Generate a plot to visualize analyze the result.
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.y.values
se_from_mgcv = data_from_r.y_est_se # noqa: F841
df = [10]
degree = [6]
bsplines = BSplines(x, degree=degree, df=df)
alpha = 0.03
glm_gam = GLMGam(y, smoother=bsplines, alpha=alpha)
res_glm_gam = glm_gam.fit(maxiter=10000, method='bfgs')
fig = res_glm_gam.plot_partial(0)
xp, yp = fig.axes[0].get_children()[0].get_data()
# Note xp and yp are sorted by x
sort_idx = np.argsort(x)
hat_y, se = res_glm_gam.partial_values(0)
# assert that main plot line is the prediction
assert_allclose(xp, x[sort_idx])
assert_allclose(yp, hat_y[sort_idx])
# Uncomment to visualize the plot
# import matplotlib.pyplot as plt
# res_glm_gam.plot_partial(0)
# plt.plot(x, y, '.')
# plt.show()
def test_cov_params():
np.random.seed(0)
n = 1000
x = np.random.uniform(0, 1, (n, 2))
x = x - x.mean()
y = x[:, 0] * x[:, 0] + np.random.normal(0, .01, n)
y -= y.mean()
bsplines = BSplines(x, degree=[3] * 2, df=[10] * 2, constraints='center')
alpha = [0, 0]
glm_gam = GLMGam(y, smoother=bsplines, alpha=alpha)
res_glm_gam = glm_gam.fit(method='pirls', max_start_irls=0,
disp=0, maxiter=5000)
glm = GLM(y, bsplines.basis)
res_glm = glm.fit()
assert_allclose(res_glm.cov_params(), res_glm_gam.cov_params(),
rtol=0.0025)
alpha = 1e-13
glm_gam = GLMGam(y, smoother=bsplines, alpha=alpha)
res_glm_gam = glm_gam.fit(method='pirls', max_start_irls=0,
disp=0, maxiter=5000)
assert_allclose(res_glm.cov_params(), res_glm_gam.cov_params(),
atol=1e-10)
res_glm_gam = glm_gam.fit(method='bfgs', max_start_irls=0,
disp=0, maxiter=5000, maxfun=5000)
assert_allclose(res_glm.cov_params(), res_glm_gam.cov_params(),
rtol=1e-4, atol=1e-8)
| bsd-3-clause |
GitYiheng/reinforcement_learning_test | test06_deep_reinforcement_learning/multipendulum_env.py | 1 | 13854 | import gym
import numpy as np
from gym import error, spaces, utils
from gym.utils import seeding
from numpy import array, linspace, deg2rad, zeros
from sympy import symbols
from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod
from scipy.integrate import odeint
from pydy.codegen.ode_function_generators import generate_ode_function
import matplotlib.pyplot as plt
class MultipendulumEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
#=======================#
# Parameters for step() #
#=======================#
# Maximum number of steps before episode termination
self.max_steps = 200
# For ODE integration
self.dt = .001 # Simultaion time step = 1ms
self.sim_steps = 51 # Number of simulation steps in 1 learning step
self.dt_step = np.linspace(0., self.dt*self.sim_steps, num=self.sim_steps) # Learning time step = 50ms
# Termination conditions for simulation
self.num_steps = 0 # Number of steps
self.done = False
# For visualisation
self.viewer = None
self.ax = False
# Constraints for observation
min_angle = -np.pi
max_angle = np.pi
min_omega = -10.
max_omega = 10.
min_torque = -10.
max_torque = 10.
low_state = np.array([min_angle, min_angle, min_angle, min_omega, min_omega, min_omega])
high_state = np.array([max_angle, max_angle, max_angle, max_omega, max_omega, max_omega])
low_action = np.array([min_torque, min_torque, min_torque])
high_action = np.array([max_torque, max_torque, max_torque])
self.action_space = spaces.Box(low=low_action, high=high_action)
self.observation_space = spaces.Box(low=low_state, high=high_state)
# Seed...
self.seed()
#==============#
# Orientations #
#==============#
self.theta1, self.theta2, self.theta3 = dynamicsymbols('theta1, theta2, theta3')
self.inertial_frame = ReferenceFrame('I')
self.lower_leg_frame = ReferenceFrame('L')
self.lower_leg_frame.orient(self.inertial_frame, 'Axis', (self.theta1, self.inertial_frame.z))
self.upper_leg_frame = ReferenceFrame('U')
self.upper_leg_frame.orient(self.lower_leg_frame, 'Axis', (self.theta2, self.lower_leg_frame.z))
self.torso_frame = ReferenceFrame('T')
self.torso_frame.orient(self.upper_leg_frame, 'Axis', (self.theta3, self.upper_leg_frame.z))
#=================#
# Point Locations #
#=================#
#--------#
# Joints #
#--------#
self.lower_leg_length, self.upper_leg_length = symbols('l_L, l_U')
self.ankle = Point('A')
self.knee = Point('K')
self.knee.set_pos(self.ankle, self.lower_leg_length * self.lower_leg_frame.y)
self.hip = Point('H')
self.hip.set_pos(self.knee, self.upper_leg_length * self.upper_leg_frame.y)
#--------------------------#
# Center of mass locations #
#--------------------------#
self.lower_leg_com_length, self.upper_leg_com_length, self.torso_com_length = symbols('d_L, d_U, d_T')
self.lower_leg_mass_center = Point('L_o')
self.lower_leg_mass_center.set_pos(self.ankle, self.lower_leg_com_length * self.lower_leg_frame.y)
self.upper_leg_mass_center = Point('U_o')
self.upper_leg_mass_center.set_pos(self.knee, self.upper_leg_com_length * self.upper_leg_frame.y)
self.torso_mass_center = Point('T_o')
self.torso_mass_center.set_pos(self.hip, self.torso_com_length * self.torso_frame.y)
#===========================================#
# Define kinematical differential equations #
#===========================================#
self.omega1, self.omega2, self.omega3 = dynamicsymbols('omega1, omega2, omega3')
self.time = symbols('t')
self.kinematical_differential_equations = [self.omega1 - self.theta1.diff(self.time),
self.omega2 - self.theta2.diff(self.time),
self.omega3 - self.theta3.diff(self.time)]
#====================#
# Angular Velocities #
#====================#
self.lower_leg_frame.set_ang_vel(self.inertial_frame, self.omega1 * self.inertial_frame.z)
self.upper_leg_frame.set_ang_vel(self.lower_leg_frame, self.omega2 * self.lower_leg_frame.z)
self.torso_frame.set_ang_vel(self.upper_leg_frame, self.omega3 * self.upper_leg_frame.z)
#===================#
# Linear Velocities #
#===================#
self.ankle.set_vel(self.inertial_frame, 0)
self.lower_leg_mass_center.v2pt_theory(self.ankle, self.inertial_frame, self.lower_leg_frame)
self.knee.v2pt_theory(self.ankle, self.inertial_frame, self.lower_leg_frame)
self.upper_leg_mass_center.v2pt_theory(self.knee, self.inertial_frame, self.upper_leg_frame)
self.hip.v2pt_theory(self.knee, self.inertial_frame, self.upper_leg_frame)
self.torso_mass_center.v2pt_theory(self.hip, self.inertial_frame, self.torso_frame)
#======#
# Mass #
#======#
self.lower_leg_mass, self.upper_leg_mass, self.torso_mass = symbols('m_L, m_U, m_T')
#=========#
# Inertia #
#=========#
self.lower_leg_inertia, self.upper_leg_inertia, self.torso_inertia = symbols('I_Lz, I_Uz, I_Tz')
self.lower_leg_inertia_dyadic = inertia(self.lower_leg_frame, 0, 0, self.lower_leg_inertia)
self.lower_leg_central_inertia = (self.lower_leg_inertia_dyadic, self.lower_leg_mass_center)
self.upper_leg_inertia_dyadic = inertia(self.upper_leg_frame, 0, 0, self.upper_leg_inertia)
self.upper_leg_central_inertia = (self.upper_leg_inertia_dyadic, self.upper_leg_mass_center)
self.torso_inertia_dyadic = inertia(self.torso_frame, 0, 0, self.torso_inertia)
self.torso_central_inertia = (self.torso_inertia_dyadic, self.torso_mass_center)
#==============#
# Rigid Bodies #
#==============#
self.lower_leg = RigidBody('Lower Leg', self.lower_leg_mass_center, self.lower_leg_frame,
self.lower_leg_mass, self.lower_leg_central_inertia)
self.upper_leg = RigidBody('Upper Leg', self.upper_leg_mass_center, self.upper_leg_frame,
self.upper_leg_mass, self.upper_leg_central_inertia)
self.torso = RigidBody('Torso', self.torso_mass_center, self.torso_frame,
self.torso_mass, self.torso_central_inertia)
#=========#
# Gravity #
#=========#
self.g = symbols('g')
self.lower_leg_grav_force = (self.lower_leg_mass_center,
-self.lower_leg_mass * self.g * self.inertial_frame.y)
self.upper_leg_grav_force = (self.upper_leg_mass_center,
-self.upper_leg_mass * self.g * self.inertial_frame.y)
self.torso_grav_force = (self.torso_mass_center, -self.torso_mass * self.g * self.inertial_frame.y)
#===============#
# Joint Torques #
#===============#
self.ankle_torque, self.knee_torque, self.hip_torque = dynamicsymbols('T_a, T_k, T_h')
self.lower_leg_torque = (self.lower_leg_frame,
self.ankle_torque * self.inertial_frame.z - self.knee_torque *
self.inertial_frame.z)
self.upper_leg_torque = (self.upper_leg_frame,
self.knee_torque * self.inertial_frame.z - self.hip_torque *
self.inertial_frame.z)
self.torso_torque = (self.torso_frame, self.hip_torque * self.inertial_frame.z)
#=====================#
# Equations of Motion #
#=====================#
self.coordinates = [self.theta1, self.theta2, self.theta3]
self.speeds = [self.omega1, self.omega2, self.omega3]
self.kane = KanesMethod(self.inertial_frame,
self.coordinates,
self.speeds,
self.kinematical_differential_equations)
self.loads = [self.lower_leg_grav_force,
self.upper_leg_grav_force,
self.torso_grav_force,
self.lower_leg_torque,
self.upper_leg_torque,
self.torso_torque]
self.bodies = [self.lower_leg, self.upper_leg, self.torso]
self.fr, self.frstar = self.kane.kanes_equations(self.bodies, self.loads)
self.mass_matrix = self.kane.mass_matrix_full
self.forcing_vector = self.kane.forcing_full
#=============================#
# List the symbolic arguments #
#=============================#
#-----------#
# Constants #
#-----------#
self.constants = [self.lower_leg_length,
self.lower_leg_com_length,
self.lower_leg_mass,
self.lower_leg_inertia,
self.upper_leg_length,
self.upper_leg_com_length,
self.upper_leg_mass,
self.upper_leg_inertia,
self.torso_com_length,
self.torso_mass,
self.torso_inertia,
self.g]
#--------------#
# Time Varying #
#--------------#
self.coordinates = [self.theta1, self.theta2, self.theta3]
self.speeds = [self.omega1, self.omega2, self.omega3]
self.specified = [self.ankle_torque, self.knee_torque, self.hip_torque]
#=======================#
# Generate RHS Function #
#=======================#
self.right_hand_side = generate_ode_function(self.forcing_vector, self.coordinates, self.speeds,
self.constants, mass_matrix=self.mass_matrix,
specifieds=self.specified)
#==============================#
# Specify Numerical Quantities #
#==============================#
self.x = zeros(6)
self.x[:3] = deg2rad(2.0)
# taken from male1.txt in yeadon (maybe I should use the values in Winters).
# self.numerical_constants = array([0.611, # lower_leg_length [m]
# 0.387, # lower_leg_com_length [m]
# 6.769, # lower_leg_mass [kg]
# 0.101, # lower_leg_inertia [kg*m^2]
# 0.424, # upper_leg_length [m]
# 0.193, # upper_leg_com_length
# 17.01, # upper_leg_mass [kg]
# 0.282, # upper_leg_inertia [kg*m^2]
# 0.305, # torso_com_length [m]
# 32.44, # torso_mass [kg]
# 1.485, # torso_inertia [kg*m^2]
# 9.81]) # acceleration due to gravity [m/s^2]
self.numerical_constants = array([0.500, # lower_leg_length [m]
0.250, # lower_leg_com_length [m]
0.500, # lower_leg_mass [kg]
0.03125, # lower_leg_inertia [kg*m^2]
0.500, # upper_leg_length [m]
0.250, # upper_leg_com_length
0.500, # upper_leg_mass [kg]
0.03125, # upper_leg_inertia [kg*m^2]
0.250, # torso_com_length [m]
0.500, # torso_mass [kg]
0.03125, # torso_inertia [kg*m^2]
9.81]) # acceleration due to gravity [m/s^2]
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
# self.x = zeros(6)
# self.x[:3] = deg2rad(2.0)
self.num_steps = 0
self.done = False
self.x = np.random.randn(6)
self.x[:3] += np.array([np.pi, np.pi, np.pi])
return self._get_obs()
def _get_obs(self):
return self.x
def sample_action(self):
return np.random.randn(3)
def step(self, action):
if self.done == True or self.num_steps > self.max_steps:
self.done = True
# Normalised reward
reward = 0.
# Unnormalised reward
# reward = -60.
return self.x, reward, self.done, {}
else:
# Increment the step counter
self.num_steps += 1
# Simulation
self.x = odeint(self.right_hand_side, self.x, self.dt_step,
args=(action, self.numerical_constants))[-1]
# Normalise joint angles to -pi ~ pi
self.x[:3] = self.angle_normalise(self.x[:3])
# Normalise the reward to 0. ~ 1.
# Max reward: 0. -> 1.
# Min reward: -59.90881320326807 -> 0.
reward_unnormed = 60. - (self.x[0] ** 2 + self.x[1] ** 2 + self.x[2] ** 2 + .1 * self.x[3] ** 2 + .1 * self.x[4] ** 2 + .1 * self.x[5] ** 2 + .001 * action[0] ** 2 + .001 * action[1] ** 2 + .001 * action[2] ** 2)
reward = reward_unnormed / 60.
# Unnormalized reward
# reward = - (self.x[0] ** 2 + self.x[1] ** 2 + self.x[2] ** 2 + .1 * self.x[3] ** 2 + .1 * self.x[4] ** 2 + .1 * self.x[5] ** 2 + .001 * action[0] ** 2 + .001 * action[1] ** 2 + .001 * action[2] ** 2)
return self.x, reward, self.done, {}
def angle_normalise(self, angle_input):
return (((angle_input+np.pi) % (2*np.pi)) - np.pi)
def render(self, mode='human'):
if not self.ax:
fig, ax = plt.subplots()
ax.set_xlim([-5, 5])
ax.set_ylim([-5, 5])
ax.set_aspect('equal')
self.ax = ax
else:
self.ax.clear()
self.ax.set_xlim([-5, 5])
self.ax.set_ylim([-5, 5])
self.ax.set_aspect('equal')
x0 = 0.
y0 = 0.
x1 = x0 + np.cos(self.x[0]+np.pi/2.)
y1 = y0 + np.sin(self.x[0]+np.pi/2.)
x2 = x1 + np.cos(self.x[1]+np.pi/2.)
y2 = y1 + np.sin(self.x[1]+np.pi/2.)
x3 = x2 + np.cos(self.x[2]+np.pi/2.)
y3 = y2 + np.sin(self.x[2]+np.pi/2.)
plt.plot([x0, x1, x2, x3], [y0, y1, y2, y3])
plt.pause(0.01) | mit |
hsiaoyi0504/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
bsipocz/statsmodels | statsmodels/sandbox/examples/ex_cusum.py | 33 | 3219 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 02 11:41:25 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats
from numpy.testing import assert_almost_equal
import statsmodels.api as sm
from statsmodels.sandbox.regression.onewaygls import OneWayLS
from statsmodels.stats.diagnostic import recursive_olsresiduals
from statsmodels.sandbox.stats.diagnostic import _recursive_olsresiduals2 as recursive_olsresiduals2
#examples from ex_onewaygls.py
#choose example
#--------------
example = ['null', 'smalldiff', 'mediumdiff', 'largediff'][1]
example_size = [20, 100][1]
example_groups = ['2', '2-2'][1]
#'2-2': 4 groups,
# groups 0 and 1 and groups 2 and 3 have identical parameters in DGP
#generate example
#----------------
#np.random.seed(87654589)
nobs = example_size
x1 = 0.1+np.random.randn(nobs)
y1 = 10 + 15*x1 + 2*np.random.randn(nobs)
x1 = sm.add_constant(x1, prepend=False)
#assert_almost_equal(x1, np.vander(x1[:,0],2), 16)
#res1 = sm.OLS(y1, x1).fit()
#print res1.params
#print np.polyfit(x1[:,0], y1, 1)
#assert_almost_equal(res1.params, np.polyfit(x1[:,0], y1, 1), 14)
#print res1.summary(xname=['x1','const1'])
#regression 2
x2 = 0.1+np.random.randn(nobs)
if example == 'null':
y2 = 10 + 15*x2 + 2*np.random.randn(nobs) # if H0 is true
elif example == 'smalldiff':
y2 = 11 + 16*x2 + 2*np.random.randn(nobs)
elif example == 'mediumdiff':
y2 = 12 + 16*x2 + 2*np.random.randn(nobs)
else:
y2 = 19 + 17*x2 + 2*np.random.randn(nobs)
x2 = sm.add_constant(x2, prepend=False)
# stack
x = np.concatenate((x1,x2),0)
y = np.concatenate((y1,y2))
if example_groups == '2':
groupind = (np.arange(2*nobs)>nobs-1).astype(int)
else:
groupind = np.mod(np.arange(2*nobs),4)
groupind.sort()
#x = np.column_stack((x,x*groupind[:,None]))
res1 = sm.OLS(y, x).fit()
skip = 8
rresid, rparams, rypred, rresid_standardized, rresid_scaled, rcusum, rcusumci = \
recursive_olsresiduals(res1, skip)
print(rcusum)
print(rresid_scaled[skip-1:])
assert_almost_equal(rparams[-1], res1.params)
import matplotlib.pyplot as plt
plt.plot(rcusum)
plt.plot(rcusumci[0])
plt.plot(rcusumci[1])
plt.figure()
plt.plot(rresid)
plt.plot(np.abs(rresid))
print('cusum test reject:')
print(((rcusum[1:]>rcusumci[1])|(rcusum[1:]<rcusumci[0])).any())
rresid2, rparams2, rypred2, rresid_standardized2, rresid_scaled2, rcusum2, rcusumci2 = \
recursive_olsresiduals2(res1, skip)
#assert_almost_equal(rparams[skip+1:], rparams2[skip:-1],13)
assert_almost_equal(rparams[skip:], rparams2[skip:],13)
#np.c_[rparams[skip+1:], rparams2[skip:-1]]
#plt.show()
#################### Example break test
#import statsmodels.sandbox.tools.stattools
from statsmodels.sandbox.stats.diagnostic import breaks_hansen, \
breaks_cusumolsresid#, breaks_cusum
H, crit95, ft, s = breaks_hansen(res1)
print(H)
print(crit95)
supb, pval, crit = breaks_cusumolsresid(res1.resid)
print(supb, pval, crit)
##check whether this works directly: Ploberger/Kramer framing of standard cusum
##no, it's different, there is another denominator
#print breaks_cusumolsresid(rresid[skip:])
#this function is still completely wrong, cut and paste doesn't apply
#print breaks_cusum(rresid[skip:])
| bsd-3-clause |
saiwing-yeung/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
TomAugspurger/pandas | asv_bench/benchmarks/tslibs/timedelta.py | 8 | 1594 | """
Timedelta benchmarks that rely only on tslibs. See benchmarks.timedeltas for
Timedelta benchmarks that rely on other parts fo pandas.
"""
import datetime
import numpy as np
from pandas import Timedelta
class TimedeltaConstructor:
def setup(self):
self.nptimedelta64 = np.timedelta64(3600)
self.dttimedelta = datetime.timedelta(seconds=3600)
self.td = Timedelta(3600, unit="s")
def time_from_int(self):
Timedelta(123456789)
def time_from_unit(self):
Timedelta(1, unit="d")
def time_from_components(self):
Timedelta(
days=1,
hours=2,
minutes=3,
seconds=4,
milliseconds=5,
microseconds=6,
nanoseconds=7,
)
def time_from_datetime_timedelta(self):
Timedelta(self.dttimedelta)
def time_from_np_timedelta(self):
Timedelta(self.nptimedelta64)
def time_from_string(self):
Timedelta("1 days")
def time_from_iso_format(self):
Timedelta("P4DT12H30M5S")
def time_from_missing(self):
Timedelta("nat")
def time_from_pd_timedelta(self):
Timedelta(self.td)
class TimedeltaProperties:
def setup_cache(self):
td = Timedelta(days=365, minutes=35, seconds=25, milliseconds=35)
return td
def time_timedelta_days(self, td):
td.days
def time_timedelta_seconds(self, td):
td.seconds
def time_timedelta_microseconds(self, td):
td.microseconds
def time_timedelta_nanoseconds(self, td):
td.nanoseconds
| bsd-3-clause |
spennihana/h2o-3 | py2/testdir_single_jvm/test_GLM_hastie_shuffle.py | 20 | 5925 | import unittest, time, sys, random, copy
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_jobs, h2o_glm, h2o_util
from h2o_test import verboseprint, dump_json, OutputObj
# Dataset created from this:
# Elements of Statistical Learning 2nd Ed.; Hastie, Tibshirani, Friedman; Feb 2011
# example 10.2 page 357
# Ten features, standard independent Gaussian. Target y is:
# y[i] = 1 if sum(X[i]) > .34 else -1
# 9.34 is the median of a chi-squared random variable with 10 degrees of freedom
# (sum of squares of 10 standard Gaussians)
# http://www.stanford.edu/~hastie/local.ftp/Springer/ESLII_print5.pdf
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
def glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30):
print "\nStarting GLM of", csvFilename
# we can force a col type to enum now? with param columnTypes
# "Numeric"
# make the last column enum
# Instead of string for parse, make this a dictionary, with column index, value
# that's used for updating the ColumnTypes array before making it a string for parse
columnTypeDict = {10: 'Enum'}
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, columnTypeDict=columnTypeDict,
hex_key=csvFilename + ".hex", schema='put', timeoutSecs=30)
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key)
parse_key = pA.parse_key
numRows = iA.numRows
numCols = iA.numCols
labelList = iA.labelList
for i in range(10):
print "Summary on column", i
# FIX! how come only 0 works here for column
co = h2o_cmd.runSummary(key=parse_key, column=i)
for k,v in co:
print k, v
expected = []
allowedDelta = 0
labelListUsed = list(labelList)
labelListUsed.remove('C11')
numColsUsed = numCols - 1
parameters = {
'validation_frame': parse_key,
'ignored_columns': None,
# FIX! for now just use a column that's binomial
'response_column': 'C11',
# FIX! when is this needed? redundant for binomial?
'balance_classes': False,
'max_after_balance_size': None,
'standardize': False,
'family': 'binomial',
'link': None,
'alpha': '[1e-4]',
'lambda': '[0.5,0.25, 0.1]',
'lambda_search': None,
'nlambdas': None,
'lambda_min_ratio': None,
# 'use_all_factor_levels': False,
}
start = time.time()
model_key = 'hastie_glm.hex'
bmResult = h2o.n0.build_model(
algo='glm',
model_id=model_key,
training_frame=parse_key,
parameters=parameters,
timeoutSecs=60)
bm = OutputObj(bmResult, 'bm')
modelResult = h2o.n0.models(key=model_key)
model = OutputObj(modelResult['models'][0]['output'], 'model')
h2o_glm.simpleCheckGLM(self, model, parameters, labelList, labelListUsed)
cmmResult = h2o.n0.compute_model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
cmm = OutputObj(cmmResult, 'cmm')
mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
mm = OutputObj(mmResult, 'mm')
prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60)
pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr')
# compare this glm to the first one. since the files are replications, the results
# should be similar?
if self.validation1:
h2o_glm.compareToFirstGlm(self, 'AUC', validation, self.validation1)
else:
# self.validation1 = copy.deepcopy(validation)
self.validation1 = None
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
validation1 = {}
def test_GLM_hastie_shuffle(self):
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
# FIX! eventually we'll compare the 1x, 2x and 4x results like we do
# in other tests. (catdata?)
# This test also adds file shuffling, to see that row order doesn't matter
csvFilename = "1mx10_hastie_10_2.data.gz"
bucket = 'home-0xdiag-datasets'
csvPathname = 'standard' + '/' + csvFilename
fullPathname = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30)
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
h2o_util.file_gunzip(fullPathname, pathname1x)
filename1xShuf = "hastie_1x.data_shuf"
pathname1xShuf = SYNDATASETS_DIR + '/' + filename1xShuf
h2o_util.file_shuffle(pathname1x, pathname1xShuf)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
h2o_util.file_cat(pathname1xShuf, pathname1xShuf, pathname2x)
filename2xShuf = "hastie_2x.data_shuf"
pathname2xShuf = SYNDATASETS_DIR + '/' + filename2xShuf
h2o_util.file_shuffle(pathname2x, pathname2xShuf)
glm_doit(self, filename2xShuf, None, pathname2xShuf, timeoutSecs=45)
# too big to shuffle?
filename4x = "hastie_4x.data"
pathname4x = SYNDATASETS_DIR + '/' + filename4x
h2o_util.file_cat(pathname2xShuf,pathname2xShuf,pathname4x)
glm_doit(self,filename4x, None, pathname4x, timeoutSecs=120)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.