repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tjhei/burnman-original | burnman/slb.py | 2 | 8946 | # BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
import scipy.optimize as opt
import birch_murnaghan as bm
import debye
import numpy as np
from equation_of_state import equation_of_state
import warnings
import matplotlib.pyplot as plt
class slb_base(equation_of_state):
"""
Base class for the finite strain-Mie-Grueneiesen-Debye equation of state detailed
in Stixrude and Lithgow-Bertelloni (2005). For the most part, the equations are
all third order in strain, but see further the slb2 and slb3 classes
"""
def __debye_temperature(self,x,params):
"""
Finite strain approximation for Debye Temperature [K]
x = ref_vol/vol
"""
f = 1./2. * (pow(x, 2./3.) - 1.)
a1_ii = 6. * params['grueneisen_0'] # EQ 47
a2_iikk = -12.*params['grueneisen_0']+36.*pow(params['grueneisen_0'],2.) - 18.*params['q_0']*params['grueneisen_0'] # EQ 47
return params['Debye_0'] * np.sqrt(1. + a1_ii * f + 1./2. * a2_iikk*f*f)
def volume_dependent_q(self, x, params):
"""
Finite strain approximation for q, the isotropic volume strain
derivative of the grueneisen parameter
"""
f = 1./2. * (pow(x, 2./3.) - 1.)
a1_ii = 6. * params['grueneisen_0'] # EQ 47
a2_iikk = -12.*params['grueneisen_0']+36.*pow(params['grueneisen_0'],2.) - 18.*params['q_0']*params['grueneisen_0'] # EQ 47
nu_o_nu0_sq = 1.+ a1_ii*f + (1./2.)*a2_iikk * f*f # EQ 41
gr = 1./6./nu_o_nu0_sq * (2.*f+1.) * ( a1_ii + a2_iikk*f )
q = 1./9.*(18.*gr - 6. - 1./2. / nu_o_nu0_sq * (2.*f+1.)*(2.*f+1.)*a2_iikk/gr)
return q
def __isotropic_eta_s(self, x, params):
"""
Finite strain approximation for eta_s_0, the isotropic shear
strain derivative of the grueneisen parameter
"""
f = 1./2. * (pow(x, 2./3.) - 1.)
a2_s = -2.*params['grueneisen_0'] - 2.*params['eta_s_0'] # EQ 47
a1_ii = 6. * params['grueneisen_0'] # EQ 47
a2_iikk = -12.*params['grueneisen_0']+36.*pow(params['grueneisen_0'],2.) - 18.*params['q_0']*params['grueneisen_0'] # EQ 47
nu_o_nu0_sq = 1.+ a1_ii*f + (1./2.)*a2_iikk * pow(f,2.) # EQ 41
gr = 1./6./nu_o_nu0_sq * (2.*f+1.) * ( a1_ii + a2_iikk*f )
eta_s = - gr - (1./2. * pow(nu_o_nu0_sq,-1.) * pow((2.*f)+1.,2.)*a2_s) # EQ 46 NOTE the typo from Stixrude 2005
return eta_s
def volume(self, pressure, temperature, params):
"""
Returns molar volume at the pressure and temperature [m^3]
"""
debye_T = lambda x : self.__debye_temperature(params['V_0']/x, params)
gr = lambda x : self.grueneisen_parameter(pressure, temperature, x, params)
E_th = lambda x : debye.thermal_energy(temperature, debye_T(x), params['n']) #thermal energy at temperature T
E_th_ref = lambda x : debye.thermal_energy(300., debye_T(x), params['n']) #thermal energy at reference temperature
b_iikk= 9.*params['K_0'] # EQ 28
b_iikkmm= 27.*params['K_0']*(params['Kprime_0']-4.) # EQ 29
f = lambda x: 0.5*(pow(params['V_0']/x,2./3.)-1.) # EQ 24
func = lambda x: (1./3.)*(pow(1.+2.*f(x),5./2.))*((b_iikk*f(x)) \
+(0.5*b_iikkmm*pow(f(x),2.))) + gr(x)*(E_th(x) - E_th_ref(x))/x - pressure #EQ 21
# we need to have a sign change in [a,b] to find a zero. Let us start with a
# conservative guess:
a = 0.6*params['V_0']
b = 1.2*params['V_0']
# if we have a sign change, we are done:
if func(a)*func(b)<0:
return opt.brentq(func, a, b)
else:
tol = 0.0001
sol = opt.fmin(lambda x : func(x)*func(x), 1.0*params['V_0'], ftol=tol, full_output=1, disp=0)
if sol[1] > tol*2:
raise ValueError('Cannot find volume, likely outside of the range of validity for EOS')
else:
warnings.warn("May be outside the range of validity for EOS")
return sol[0]
def grueneisen_parameter(self, pressure, temperature, volume, params):
"""
Returns grueneisen parameter at the pressure, temperature, and volume
"""
x = params['V_0'] / volume
f = 1./2. * (pow(x, 2./3.) - 1.)
gruen_0 = params['grueneisen_0']
a1_ii = 6. * gruen_0 # EQ 47
a2_iikk = -12.*gruen_0 + 36.*gruen_0*gruen_0 - 18.*params['q_0']*gruen_0 # EQ 47
nu_o_nu0_sq = 1.+ a1_ii*f + (1./2.)*a2_iikk * f*f # EQ 41
return 1./6./nu_o_nu0_sq * (2.*f+1.) * ( a1_ii + a2_iikk*f )
def isothermal_bulk_modulus(self, pressure,temperature, volume, params):
"""
Returns isothermal bulk modulus at the pressure, temperature, and volume [Pa]
"""
debye_T = self.__debye_temperature(params['V_0']/volume, params)
gr = self.grueneisen_parameter(pressure, temperature, volume, params)
E_th = debye.thermal_energy(temperature, debye_T, params['n']) #thermal energy at temperature T
E_th_ref = debye.thermal_energy(300.,debye_T, params['n']) #thermal energy at reference temperature
C_v = debye.heat_capacity_v(temperature, debye_T, params['n']) #heat capacity at temperature T
C_v_ref = debye.heat_capacity_v(300.,debye_T, params['n']) #heat capacity at reference temperature
q = self.volume_dependent_q(params['V_0']/volume, params)
K = bm.bulk_modulus(volume, params) \
+ (gr + 1.-q)* ( gr / volume ) * (E_th - E_th_ref) \
- ( pow(gr , 2.) / volume )*(C_v*temperature - C_v_ref*300.)
return K
def adiabatic_bulk_modulus(self, pressure, temperature, volume, params):
"""
Returns adiabatic bulk modulus at the pressure, temperature, and volume [Pa]
"""
K_T=self.isothermal_bulk_modulus(pressure, temperature, volume, params)
alpha = self.thermal_expansivity(pressure, temperature, volume, params)
gr = self.grueneisen_parameter(pressure, temperature, volume, params)
K_S = K_T*(1. + gr * alpha * temperature)
return K_S
def shear_modulus(self, pressure, temperature, volume, params):
"""
Returns shear modulus at the pressure, temperature, and volume [Pa]
"""
debye_T = self.__debye_temperature(params['V_0']/volume, params)
eta_s = self.__isotropic_eta_s(params['V_0']/volume, params)
E_th = debye.thermal_energy(temperature ,debye_T, params['n'])
E_th_ref = debye.thermal_energy(300.,debye_T, params['n'])
if self.order==2:
return bm.shear_modulus_second_order(volume, params) - eta_s * (E_th-E_th_ref) / volume
elif self.order==3:
return bm.shear_modulus_third_order(volume, params) - eta_s * (E_th-E_th_ref) / volume
else:
raise NotImplementedError("")
def heat_capacity_v(self, pressure, temperature, volume, params):
"""
Returns heat capacity at constant volume at the pressure, temperature, and volume [J/K/mol]
"""
debye_T = self.__debye_temperature(params['V_0']/volume, params)
return debye.heat_capacity_v(temperature, debye_T,params['n'])
def heat_capacity_p(self, pressure, temperature, volume, params):
"""
Returns heat capacity at constant pressure at the pressure, temperature, and volume [J/K/mol]
"""
alpha = self.thermal_expansivity(pressure, temperature, volume, params)
gr = self.grueneisen_parameter(pressure, temperature, volume, params)
C_v = self.heat_capacity_v(pressure, temperature, volume, params)
C_p = C_v*(1. + gr * alpha * temperature)
return C_p
def thermal_expansivity(self, pressure, temperature, volume, params):
"""
Returns thermal expansivity at the pressure, temperature, and volume [1/K]
"""
C_v = self.heat_capacity_v(pressure, temperature, volume, params)
gr = self.grueneisen_parameter(pressure, temperature, volume, params)
K = self.isothermal_bulk_modulus(pressure, temperature, volume, params)
alpha = gr * C_v / K / volume
return alpha
class slb3(slb_base):
"""
SLB equation of state with third order finite strain expansion for the
shear modulus (this should be preferred, as it is more thermodynamically
consistent.
"""
def __init__(self):
self.order=3
class slb2(slb_base):
"""
SLB equation of state with second order finite strain expansion for the
shear modulus. In general, this should not be used, but sometimes
shear modulus data is fit to a second order equation of state. In that
case, you should use this. The moral is, be careful!
"""
def __init__(self):
self.order=2
| gpl-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/computation/tests/test_compat.py | 9 | 2001 | #!/usr/bin/env python
# flake8: noqa
import nose
from itertools import product
from distutils.version import LooseVersion
import pandas as pd
from pandas.util import testing as tm
from pandas.computation.engines import _engines
import pandas.computation.expr as expr
ENGINES_PARSERS = list(product(_engines, expr._parsers))
def test_compat():
# test we have compat with our version of nu
from pandas.computation import _NUMEXPR_INSTALLED
try:
import numexpr as ne
ver = ne.__version__
if ver == LooseVersion('2.4.4'):
assert not _NUMEXPR_INSTALLED
elif ver < LooseVersion('2.1'):
with tm.assert_produces_warning(UserWarning,
check_stacklevel=False):
assert not _NUMEXPR_INSTALLED
else:
assert _NUMEXPR_INSTALLED
except ImportError:
raise nose.SkipTest("not testing numexpr version compat")
def test_invalid_numexpr_version():
for engine, parser in ENGINES_PARSERS:
yield check_invalid_numexpr_version, engine, parser
def check_invalid_numexpr_version(engine, parser):
def testit():
a, b = 1, 2
res = pd.eval('a + b', engine=engine, parser=parser)
tm.assert_equal(res, 3)
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
raise nose.SkipTest("no numexpr")
else:
if ne.__version__ < LooseVersion('2.1'):
with tm.assertRaisesRegexp(ImportError, "'numexpr' version is "
".+, must be >= 2.1"):
testit()
elif ne.__version__ == LooseVersion('2.4.4'):
raise nose.SkipTest("numexpr version==2.4.4")
else:
testit()
else:
testit()
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
BBN-Q/PySimulator | tests/SimSpeedTest.py | 1 | 5046 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 5 21:44:22 2012
@author: cryan
"""
import numpy as np
from numpy import sin, cos
from scipy.constants import pi
from scipy.linalg import expm, eigh
from PySim.SystemParams import SystemParams
from PySim.PulseSequence import PulseSequence
from PySim.Simulation import simulate_sequence_stack, simulate_sequence
from PySim.QuantumSystems import SCQubit, Hamiltonian, Dissipator
from numba import *
#import matplotlib.pyplot as plt
#from timeit import timeit
#Try to load the CPPBackEnd
try:
import PySim.CySim
CPPBackEnd = True
except ImportError:
CPPBackEnd = False
#@jit(c16[:,:](c16[:,:], c16))
def expm_eigen(matIn, mult):
'''
Helper function to compute matrix exponential of Hermitian matrix
'''
dim = matIn.shape[0]
D, V = eigh(matIn)
return V.dot(np.diag(np.exp(mult*D))).dot(V.conj().T)
@autojit()
def mult_a_X(alpha, X):
outArray = np.zeros_like(X)
for rowct in range(X.shape[0]):
for colct in range(X.shape[1]):
outArray[rowct,colct] = alpha*X[rowct, colct]
return outArray
@jit(c16[:,:](c16[:,:], c16[:,:,:], f8[:,:], f8[:]))
#@autojit
def evolution_numba(Hnat, controlHams, controlFields, controlFreqs):
timeStep = 0.01
curTime = 0.0
Uprop = np.eye(Hnat.shape[0])
for timect in range(controlFields.shape[1]):
tmpH = np.copy(Hnat)
for controlct in range(controlFields.shape[0]):
tmpMult = controlFields[controlct, timect]*cos(2*pi*curTime*controlFreqs[controlct])
for rowct in range(tmpH.shape[0]):
for colct in range(tmpH.shape[1]):
tmpH[rowct,colct] += tmpMult*controlHams[controlct, rowct, colct]
Uprop = np.dot(expm_eigen(tmpH,-1j*2*pi*timeStep)[0],Uprop)
curTime += timeStep
return Uprop
def evolution_numpy(Hnat, controlHams, controlFields, controlFreqs):
timeStep = 0.01
curTime = 0.0
Uprop = np.eye(Hnat.shape[0])
for timect in range(controlFields.shape[1]):
tmpH = np.copy(Hnat)
for controlct in range(controlFields.shape[0]):
tmpH += controlFields[controlct, timect]*cos(2*pi*curTime*controlFreqs[controlct])*controlHams[controlct]
Uprop = np.dot(expm_eigen(tmpH,-1j*2*pi*timeStep)[0],Uprop)
curTime += timeStep
return Uprop
def sim_setup(dimension, numTimeSteps, numControls):
#Create a random natural hamiltonian
tmpMat = np.random.randn(dimension, dimension) + 1j*np.random.randn(dimension, dimension)
Hnat = tmpMat+tmpMat.conj().T
#Create random control Hamiltonians
controlHams = np.zeros((numControls,dimension, dimension), dtype=np.complex128)
for ct in range(numControls):
tmpMat = np.random.randn(dimension, dimension) + 1j*np.random.randn(dimension, dimension)
controlHams[ct] = tmpMat+tmpMat.conj().T
#Create random controlfields
controlFields = np.random.randn(numControls, numTimeSteps)
#Control frequencies
controlFreqs = np.random.randn(numControls)
return Hnat, controlHams, controlFields, controlFreqs
def sim_setup_cython(Hnat, controlHams, controlFields, controlFreqs):
systemParams = SystemParams()
systemParams.Hnat = Hamiltonian(Hnat)
pulseSeq = PulseSequence()
pulseSeq.controlAmps = controlFields
for ct in range(len(controlHams)):
systemParams.add_control_ham(inphase=Hamiltonian(controlHams[ct]))
pulseSeq.add_control_line(freq = controlFreqs[ct], phase=0, controlType='sinusoidal')
for ct in range(np.int(np.log2(Hnat.shape[0]))):
systemParams.add_sub_system(SCQubit(2,0e9, name='Q1', T1=1e-6))
pulseSeq.timeSteps = 0.01*np.ones(controlFields.shape[1])
pulseSeq.maxTimeStep = 1e6
return systemParams, pulseSeq
if __name__ == '__main__':
dims = 2**np.arange(1,6)
dim = 16
Hnat, controlHams, controlFields, controlFreqs = sim_setup(dim, 1000, 4)
print(evolution_numba(Hnat, controlHams, controlFields, controlFreqs))
# systemParams, pulseSeq = sim_setup_cython(Hnat, controlHams, controlFields, controlFreqs)
# cythonTimes = []
# numpyTimes = []
# for dim in dims:
# print(dim)
# Hnat, controlHams, controlFields, controlFreqs = sim_setup(dim, 2000, 4)
# systemParams, pulseSeq = sim_setup_cython(Hnat, controlHams, controlFields, controlFreqs)
# numpyTimes.append(timeit('evolution_numpy(Hnat, controlHams, controlFields, controlFreqs)',
# setup='from __main__ import evolution_numpy, Hnat, controlHams, controlFields, controlFreqs', number=3)/3)
# cythonTimes.append(timeit('simulate_sequence(pulseSeq, systemParams)', setup='from __main__ import simulate_sequence, pulseSeq, systemParams', number=3)/3)
#
# plt.plot(dims, numpyTimes)
# plt.plot(dims, cythonTimes)
# plt.legend(('Numpy', 'Cython'))
# plt.xlabel('System Dimension')
# plt.show()
| apache-2.0 |
yunque/sms-tools | lectures/06-Harmonic-model/plots-code/monophonic-polyphonic.py | 21 | 2258 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import stft as STFT
import utilFunctions as UF
plt.figure(1, figsize=(9, 6))
plt.subplot(211)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/carnatic.wav'))
x1 = x[4.35*fs:]
w = np.blackman(1301)
N = 2048
H = 250
t = -70
minSineDur = .02
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 3000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (carnatic.wav)')
plt.subplot(212)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/vignesh.wav'))
w = np.blackman(1101)
N = 2048
H = 250
t = -90
minSineDur = .1
maxnSines = 200
freqDevOffset = 20
freqDevSlope = 0.02
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 3000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (vignesh.wav)')
plt.tight_layout()
plt.savefig('monophonic-polyphonic.png')
plt.show() | agpl-3.0 |
nansencenter/nansat | nansat/tests/test_pointbrowser.py | 1 | 3888 | # ------------------------------------------------------------------------------
# Name: test_pointbrowser.py
# Purpose: Test the PointBrowser class
#
# Author: Aleksander Vines, Anton Korosov
#
# Created: 2015-10-22
# Copyright: (c) NERSC
# Licence: This file is part of NANSAT. You can redistribute it or modify
# under the terms of GNU General Public License, v.3
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------------------------
import os
import unittest
from mock import patch, PropertyMock, Mock, MagicMock, DEFAULT
import numpy as np
from nansat.pointbrowser import PointBrowser
try:
import matplotlib
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
except ImportError:
MATPLOTLIB_IS_INSTALLED = False
else:
MATPLOTLIB_IS_INSTALLED = True
class PointBrowserTest(unittest.TestCase):
@unittest.skipUnless(MATPLOTLIB_IS_INSTALLED, 'Matplotlib is required')
def setUp(self):
self.data = np.zeros((4, 4))
self.event = MagicMock()
def test_init(self):
""" Create Pointbrowser """
pb = PointBrowser(self.data, force_interactive=False)
self.assertIsInstance(pb.fig, plt.Figure)
self.assertTrue(np.alltrue(pb.data == self.data))
self.assertTrue(np.alltrue(pb.ax.get_images()[0].get_array() == self.data))
self.assertEqual(pb.fmt, 'x-k')
self.assertEqual(pb.points, [])
self.assertEqual(pb.coordinates, [[]])
def test_onclick(self):
""" Mimic click """
self.event = MagicMock()
self.event.xdata = 10
self.event.ydata = 10
self.event.key = None
pb = PointBrowser(self.data, force_interactive=False)
pb.onclick(self.event)
self.assertIsInstance(pb.points[0][0], matplotlib.lines.Line2D)
self.assertEqual(pb.coordinates, [[(self.event.xdata, self.event.ydata)]])
def test_onclick_none(self):
""" Mimic click outside figure """
self.event.xdata = None
self.event.ydata = None
self.event.key = None
pb = PointBrowser(self.data, force_interactive=False)
pb.onclick(self.event)
self.assertEqual(pb.points, [])
self.assertEqual(pb.coordinates, [[]])
def test_onclick_key_z(self):
""" Mimic click with 'z' pressed """
self.event.xdata = 10
self.event.ydata = 10
self.event.key = 'z'
pb = PointBrowser(self.data, force_interactive=False)
pb.onclick(self.event)
self.assertEqual(pb.points, [])
self.assertEqual(pb.coordinates, [[]])
def test_onclick_key(self):
""" Mimic click with 'anykey' pressed """
self.event = MagicMock()
self.event.xdata = 10
self.event.ydata = 10
self.event.key = 'newkey'
pb = PointBrowser(self.data, force_interactive=False)
pb.onclick(self.event)
self.assertIsInstance(pb.points[0][0], matplotlib.lines.Line2D)
self.assertEqual(pb.coordinates, [[],[(self.event.xdata, self.event.ydata)]])
def test_convert_coordinates(self):
""" Mimic click with 'anykey' pressed """
pb = PointBrowser(self.data, force_interactive=False)
pb.coordinates = [[[1,2,3],[4,5,6]]]
new_coordinates = pb._convert_coordinates()
self.assertTrue(np.all(new_coordinates[0] == np.array([[1,2,3], [4,5,6]]).T))
@patch('nansat.pointbrowser.plt')
def test_get_points(self, plt_mock):
plt_mock.show.return_value = None
pb = PointBrowser(self.data, force_interactive=False)
points = pb.get_points()
self.assertTrue(pb.fig.canvas.mpl_connect.called)
self.assertTrue(plt_mock.show.called)
self.assertEqual(points, [])
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
yunfeilu/scikit-learn | examples/manifold/plot_lle_digits.py | 59 | 8576 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
nickdex/cosmos | code/artificial_intelligence/src/logistic_regression/logistic_regression.py | 3 | 1825 | # Logistic regression implemented from Scratch in Python
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(scores):
return 1 / (1 + np.exp(-scores))
def log_likelihood(features, target, weights):
scores = np.dot(features, weights)
ll = np.sum(target * scores - np.log(1 + np.exp(scores)))
return ll
def logistic_regression(
features, target, num_steps, learning_rate, add_intercept=False
):
if add_intercept:
intercept = np.ones((features.shape[0], 1))
features = np.hstack((intercept, features))
weights = np.zeros(features.shape[1])
for step in range(num_steps):
scores = np.dot(features, weights)
predictions = sigmoid(scores)
# Update weights with gradient
output_error_signal = target - predictions
gradient = np.dot(features.T, output_error_signal)
weights += learning_rate * gradient
# Print log-likelihood every so often
if step % 10000 == 0:
print(log_likelihood(features, target, weights))
return weights
np.random.seed(12)
num_observations = 5000
x1 = np.random.multivariate_normal([0, 0], [[1, 0.75], [0.75, 1]], num_observations)
x2 = np.random.multivariate_normal([1, 4], [[1, 0.75], [0.75, 1]], num_observations)
simulated_separableish_features = np.vstack((x1, x2)).astype(np.float32)
simulated_labels = np.hstack((np.zeros(num_observations), np.ones(num_observations)))
plt.figure(figsize=(12, 8))
plt.scatter(
simulated_separableish_features[:, 0],
simulated_separableish_features[:, 1],
c=simulated_labels,
alpha=0.4,
)
plt.show()
# Running the model
weights = logistic_regression(
simulated_separableish_features,
simulated_labels,
num_steps=300000,
learning_rate=5e-5,
add_intercept=True,
)
print(weights)
| gpl-3.0 |
riemarc/pyinduct | pyinduct/tests/test_eigenfunctions.py | 3 | 22740 | import unittest
import numpy as np
import pyinduct as pi
import pyinduct.parabolic as parabolic
from pyinduct.tests import show_plots
import matplotlib.pyplot as plt
class TestAddMulFunction(unittest.TestCase):
def test_it(self):
a_mat = np.diag(np.ones(3))
b = np.array(
[pi.AddMulFunction(lambda z: z), pi.AddMulFunction(lambda z: 2 * z), pi.AddMulFunction(lambda z: 3 * z)])
x = np.dot(b, a_mat)
self.assertAlmostEqual([4, 40, 300], [x[0](4), x[1](20), x[2](100)])
class TestSecondOrderEigenfunction(unittest.TestCase):
def test_error_raiser(self):
param = [1, 1, 1, 1, 1]
l = 1
z = pi.Domain((0, l), 2)
n = 10
eig_val, eig_funcs = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, scale=np.ones(n))
eig_freq = pi.SecondOrderDirichletEigenfunction.eigval_tf_eigfreq(
param, eig_val=eig_val)
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, n=n)
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, n=n, scale=np.ones(n))
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, eig_val=eig_val, scale=np.ones(n))
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, eig_freq=eig_freq, scale=np.ones(n))
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, n=n, scale=np.ones(n + 1))
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, eig_val=eig_val, scale=np.ones(n + 1))
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, n=n, eig_freq=eig_freq)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, eig_val=eig_val, eig_freq=eig_freq)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, n=n, eig_val=eig_val, eig_freq=eig_freq)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
pi.Domain((1, 2), 2), param=param, n=n)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
pi.Domain((0, -2), 2), param=param, n=n)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
pi.Domain((0, 0), 2), param=param, n=n)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
(0, 1), param=param, n=n)
class FiniteTransformTest(unittest.TestCase):
def test_trivial(self):
l = 5
k = 5
k1, k2, b = parabolic.control.split_domain(k, 0, l, mode='coprime')[0:3]
a_mat = parabolic.general.get_in_domain_transformation_matrix(
k1, k2, mode="2n")
self.assertAlmostEqual(b, 0)
self.assertTrue(all(np.isclose(a_mat, np.linalg.inv(a_mat)).all(1)))
k1, k2, b = parabolic.control.split_domain(k, l, l, mode='coprime')[0:3]
b_mat = parabolic.general.get_in_domain_transformation_matrix(
k1, k2, mode="2n")
self.assertAlmostEqual(b, l)
self.assertTrue(
all(np.isclose(b_mat, np.diag(np.ones(b_mat.shape[0]))).all(1)))
def test_paper_example(self):
l = 5
k = 5
b_desired = 2
k1, k2, b = parabolic.control.split_domain(k,
b_desired,
l,
mode='coprime')[0:3]
m_mat = np.linalg.inv(
parabolic.general.get_in_domain_transformation_matrix(k1,
k2,
mode="2n"))
shifted_func = pi.FiniteTransformFunction(
np.cos,
m_mat,
l,
nested_lambda=False)
shifted_func_nl = pi.FiniteTransformFunction(
np.cos,
m_mat,
l,
nested_lambda=True)
z = np.linspace(0, l, 1000)
np.testing.assert_array_almost_equal(
shifted_func(z), shifted_func_nl(z))
if show_plots:
plt.figure()
plt.plot(z, shifted_func(z))
plt.plot(z, np.cos(z))
plt.show()
def test_const(self):
n = 5
k = 5
b_desired = 2
l = 5
z = pi.Domain((0, l), 2)
params = [2, 1.5, -3, 1, .5]
k1, k2, b = parabolic.control.split_domain(k,
b_desired,
l,
mode='coprime')[0:3]
M = np.linalg.inv(
parabolic.general.get_in_domain_transformation_matrix(k1,
k2,
mode="2n"))
eig_val, eig_base = pi.SecondOrderRobinEigenfunction.cure_interval(
z, param=params, n=n)
shifted_eig_base = pi.Base(np.array(
[pi.FiniteTransformFunction(
func, M, l, nested_lambda=False)
for func in eig_base]))
shifted_eig_base_nl = pi.Base(np.array(
[pi.FiniteTransformFunction(
func, M, l, nested_lambda=True)
for func in eig_base]))
zz = np.linspace(0, l, 1000)
for f1, f2 in zip(shifted_eig_base, shifted_eig_base_nl):
np.testing.assert_array_almost_equal(f1(zz), f2(zz))
if show_plots:
pi.visualize_functions(eig_base.fractions, 1000)
pi.visualize_functions(shifted_eig_base.fractions, 1000)
def calc_dirichlet_eigenvalues(params):
"""
Estimate the eigenvalues of a 2nd order dirichlet problem .
by approximating it using polynomial shapefunctions.
"""
spat_dom, lag_base = pi.cure_interval(pi.LagrangeNthOrder,
interval=params.domain,
order=3,
node_count=31)
pi.register_base("fem_base", lag_base)
old_params = [params.a2, params.a1, params.a0, -params.alpha0, params.beta0]
weak_form = pi.parabolic.get_parabolic_dirichlet_weak_form("fem_base",
"fem_base",
None,
old_params,
params.domain)
can_form = pi.parse_weak_formulation(weak_form, finalize=True)
ss_form = pi.create_state_space(can_form)
sys_mat = ss_form.A[1]
eig_vals, eig_vecs = np.linalg.eig(sys_mat)
real_idx = np.where(np.imag(eig_vals) == 0)
abs_idx = np.argsort(np.abs(eig_vals[real_idx]))
filtered_vals = eig_vals[real_idx][abs_idx]
print(filtered_vals)
return filtered_vals
class TestSecondOrderEigenVector(unittest.TestCase):
def setUp(self):
self.domain = pi.Domain(bounds=(0, 1), num=100)
self.cnt = 10
self.params_dirichlet = pi.SecondOrderOperator(a2=1,
a1=0,
a0=1,
alpha1=0,
alpha0=1,
beta1=0,
beta0=1,
domain=(0, 1))
if 1:
self.eig_dirichlet = None
self.p_dirichlet = [(1j*n * np.pi, -1j * n * np.pi)
for n in range(1, self.cnt + 1)]
else:
# TODO make computation by approximation work to check to other two
self.eig_dirichlet = \
calc_dirichlet_eigenvalues(self.params_dirichlet)[:self.cnt]
self.p_dirichlet = \
pi.SecondOrderEigenVector.convert_to_characteristic_root(
self.params_dirichlet,
self.eig_dirichlet
)
self.params_neumann = pi.SecondOrderOperator(a2=1,
a1=0,
a0=1,
alpha1=1,
alpha0=0,
beta1=1,
beta0=0)
self.eig_neumann = None
self.p_neumann = None
# self.p_neumann = np.array([0, np.pi, 2 * np.pi, 3 * np.pi],
# dtype=complex)
self.params_robin = pi.Parameters(a2=1,
a1=0,
a0=1,
alpha1=1,
alpha0=2,
beta1=1,
beta0=-2)
self.eig_robin = None
self.p_robin = None
# self.p_robin = np.array([(2.39935728j, -2.39935728j,),
# (5.59677209j, -5.59677209j),
# (8.98681892j, -8.98681892j)])
def test_dirichlet(self):
print("dirichlet case")
self._test_helper(self.params_dirichlet,
self.eig_dirichlet,
self.p_dirichlet)
def test_neumann(self):
print("neumann case")
self._test_helper(self.params_neumann,
self.eig_neumann,
self.p_neumann)
def test_robin(self):
print("robin case")
self._test_helper(self.params_robin,
self.eig_robin,
self.p_robin)
def _test_helper(self, params, l_ref, p_ref):
eig_base = pi.SecondOrderEigenVector.cure_interval(self.domain,
params=params,
count=self.cnt,
derivative_order=2,
debug=False)
char_roots = eig_base.get_attribute("char_pair")
eig_values = pi.SecondOrderEigenVector.convert_to_eigenvalue(params,
char_roots)
# if show_plots:
# pi.visualize_functions(eig_base.fractions)
# test eigenvalues
self.assertEqual(len(eig_values), self.cnt)
if l_ref is not None:
np.testing.assert_array_equal(eig_values, l_ref, verbose=True)
if p_ref is not None:
print(char_roots)
print(p_ref)
np.testing.assert_array_almost_equal(char_roots, p_ref,
decimal=5, verbose=True)
# test eigenvectors
for fraction, lam in zip(eig_base.fractions, eig_values):
# test whether the operator is satisfied
left = (params.a2 * fraction.derive(2)(self.domain.points)
+ params.a1 * fraction.derive(1)(self.domain.points)
+ params.a0 * fraction(self.domain.points))
right = lam * fraction(self.domain.points)
np.testing.assert_array_almost_equal(left, right, verbose=True)
# test whether the bcs are fulfilled
bc1 = (params.alpha0 * fraction(self.domain.bounds[0])
+ params.alpha1 * fraction.derive(1)(self.domain.bounds[0]))
bc2 = (params.beta0 * fraction(self.domain.bounds[1])
+ params.beta1 * fraction.derive(1)(self.domain.bounds[1]))
np.testing.assert_array_almost_equal(bc1, 0, decimal=5)
np.testing.assert_array_almost_equal(bc2, 0, decimal=5)
# check if they are orthonormal
product_mat = pi.calculate_scalar_product_matrix(eig_base, eig_base)
np.testing.assert_array_almost_equal(product_mat,
np.eye(self.cnt))
return eig_base
class TestEigenvalues(unittest.TestCase):
def test_dirichlet(self):
desired_eig_freq = [(i + 1) * np.pi for i in range(4)]
eig_freq, _ = pi.SecondOrderDirichletEigenfunction.eigfreq_eigval_hint(
[1, 2, 3, None, None],
1,
4)
self.assertTrue(all(np.isclose(eig_freq, desired_eig_freq)))
def test_robin(self):
param_desired_ef_pairs = [
([.5, 0, 6, -1, -1], [1.543405j, 2.331122, 5.950173, 9.208434]),
([1, 0, 1, -2, -2], [2.39935728j, 0, 5.59677209, 8.98681892]),
([1, 0, 1, 0, 0], [0j, 3.14159265, 6.28318531, 9.42477796]),
([1, 2, 1, 3, 4], [2.06301691, 4.46395118, 7.18653501, 10.09113552]),
([1, -6, 0, -5, -5], [8.000003j, 1.84683426j, 4.86945051, 8.43284888])]
for param, desired_eig_freq in param_desired_ef_pairs:
eig_freq, _ = pi.SecondOrderRobinEigenfunction.eigfreq_eigval_hint(
param, 1, 4, show_plot=False)
np.testing.assert_array_almost_equal(eig_freq, desired_eig_freq)
class TestSecondOrderEigenvalueProblemFunctions(unittest.TestCase):
def setUp(self):
self.param = [2, 1.5, -3, -5, -.5]
self.z = pi.Domain((0, 1), num=100)
self.n = 10
def evp_eq(self, a2, a1, a0, boundary_check):
for eig_v, eig_f in zip(self.eig_val, self.eig_funcs):
np.testing.assert_array_almost_equal(
(a2 * eig_f.derive(2)(self.z)
+ a1 * eig_f.derive(1)(self.z)
+ a0 * eig_f(self.z)) / eig_v,
eig_v.real * eig_f(self.z) / eig_v,
decimal=4)
boundary_check(eig_v, eig_f, self.z[-1])
@unittest.skip("not implemented")
def test_dirichlet_robin_constant_coefficient(self):
def boundary_check(eig_v, eig_f, l):
np.testing.assert_array_almost_equal(eig_f(0) / eig_v, 0)
np.testing.assert_array_almost_equal(eig_f.derive(1)(l) / eig_v,
-beta * eig_f(l) / eig_v)
a2, a1, a0, _, beta = self.param
param = [a2, a1, a0, None, beta]
eig_freq, self.eig_val \
= pi.SecondOrderDiriRobEigenfunction.eigfreq_eigval_hint(
self.z, param=param, n=self.n, show_plot=True)
_, self.eig_funcs = pi.SecondOrderDiriRobEigenfunction.cure_interval(
self.z, param=param, eig_freq=eig_freq)
[plt.plot(self.z, func(self.z)) for func in self.eig_funcs]
plt.show()
self.evp_eq(a2, a1, a0, boundary_check)
self.spatially_varying_coefficient(boundary_check)
@unittest.skip("not implemented")
def test_robin_dirichlet_constant_coefficient(self):
def boundary_check(eig_v, eig_f, l):
np.testing.assert_array_almost_equal(eig_f.derive(1)(0) / eig_v,
alpha * eig_f(0) / eig_v)
np.testing.assert_array_almost_equal(eig_f(l) / eig_v, 0)
a2, a1, a0, alpha, _ = self.param
param = [a2, a1, a0, alpha, None]
eig_freq, self.eig_val \
= pi.SecondOrderRobDiriEigenfunction.eigfreq_eigval_hint(
self.z, param=param, n=self.n, show_plot=True)
_, self.eig_funcs = pi.SecondOrderRobDiriEigenfunction.cure_interval(
self.z, param=param, eig_freq=eig_freq)
[plt.plot(self.z, func(self.z)) for func in self.eig_funcs]
plt.show()
self.evp_eq(a2, a1, a0, boundary_check)
self.spatially_varying_coefficient(boundary_check)
def test_dirichlet_constant_coefficient(self):
def boundary_check(eig_v, eig_f, l):
np.testing.assert_array_almost_equal(eig_f(0) / eig_v, 0)
np.testing.assert_array_almost_equal(eig_f(l) / eig_v, 0)
a2, a1, a0, _, _ = self.param
param = [a2, a1, a0, None, None]
eig_freq, self.eig_val \
= pi.SecondOrderDirichletEigenfunction.eigfreq_eigval_hint(
param, self.z[-1], self.n)
_, self.eig_funcs = pi.SecondOrderDirichletEigenfunction.cure_interval(
self.z, param=param, eig_freq=eig_freq)
self.evp_eq(a2, a1, a0, boundary_check)
self.spatially_varying_coefficient(boundary_check)
def test_robin_constant_coefficient(self):
def boundary_check(eig_v, eig_f, l):
np.testing.assert_array_almost_equal(eig_f.derive(1)(0) / eig_v,
alpha * eig_f(0) / eig_v)
np.testing.assert_array_almost_equal(eig_f.derive(1)(l) / eig_v,
- beta * eig_f(l) / eig_v)
a2, a1, a0, alpha, beta = self.param
eig_freq, self.eig_val \
= pi.SecondOrderRobinEigenfunction.eigfreq_eigval_hint(
self.param,
self.z[-1],
self.n,
show_plot=show_plots)
_, self.eig_funcs = pi.SecondOrderRobinEigenfunction.cure_interval(
self.z, param=self.param, eig_freq=eig_freq)
self.evp_eq(a2, a1, a0, boundary_check)
self.spatially_varying_coefficient(boundary_check)
if show_plots:
plt.show()
def spatially_varying_coefficient(self, boundary_check):
a2, a1, a0, _, _ = self.param
a2_z = pi.ConstantFunction(a2)
a1_z = pi.ConstantFunction(a1)
a0_z = pi.ConstantFunction(a0)
transformed_eig_funcs = [pi.TransformedSecondOrderEigenfunction(
self.eig_val[i],
[self.eig_funcs[i](0), self.eig_funcs[i].derive(1)(0), 0, 0],
[a2_z, a1_z, a0_z],
self.z)
for i in range(len(self.eig_funcs))]
# TODO: provide second derivative of transformed eigenfunctions
for i in range(len(self.eig_funcs)):
eig_f = transformed_eig_funcs[i]
eig_v = self.eig_val[i]
# interval
np.testing.assert_array_almost_equal(
a2_z(self.z) * self.eig_funcs[i].derive(2)(self.z)
+ a1_z(self.z) * eig_f.derive(1)(self.z)
+ a0_z(self.z) * eig_f(self.z),
eig_v.real * eig_f(self.z),
decimal=2)
boundary_check(eig_v, eig_f, self.z[-1])
class IntermediateTransformationTest(unittest.TestCase):
def test_it(self):
# system/simulation parameters
self.l = 1
self.spatial_domain = (0, self.l)
self.spatial_disc = 30
self.n = 10
# original system parameters
a2 = 1.5
a1 = 2.5
a0 = 28
alpha = -2
beta = -3
self.param = [a2, a1, a0, alpha, beta]
adjoint_param = pi.SecondOrderEigenfunction.get_adjoint_problem(self.param)
# target system parameters (controller parameters)
a1_t = -5
a0_t = -25
alpha_t = 3
beta_t = 2
# a1_t = a1; a0_t = a0; alpha_t = alpha; beta_t = beta
self.param_t = [a2, a1_t, a0_t, alpha_t, beta_t]
# original intermediate ("_i") and target intermediate ("_ti") system parameters
_, _, a0_i, self.alpha_i, self.beta_i = \
parabolic.general.eliminate_advection_term(self.param, self.l)
self.param_i = a2, 0, a0_i, self.alpha_i, self.beta_i
_, _, a0_ti, self.alpha_ti, self.beta_ti = \
parabolic.general.eliminate_advection_term(self.param_t, self.l)
self.param_ti = a2, 0, a0_ti, self.alpha_ti, self.beta_ti
# create (not normalized) eigenfunctions
self.eig_freq, self.eig_val = \
pi.SecondOrderRobinEigenfunction.eigfreq_eigval_hint(self.param,
self.l,
self.n)
init_eig_base = pi.Base([pi.SecondOrderRobinEigenfunction(om,
self.param,
self.spatial_domain[-1])
for om in self.eig_freq])
init_adjoint_eig_funcs = pi.Base([pi.SecondOrderRobinEigenfunction(om,
adjoint_param,
self.spatial_domain[-1])
for om in self.eig_freq])
# normalize eigenfunctions and adjoint eigenfunctions
self.eig_base, self.adjoint_eig_funcs = pi.normalize_base(init_eig_base, init_adjoint_eig_funcs)
# eigenvalues and -frequencies test
eig_freq_i, eig_val_i = pi.SecondOrderRobinEigenfunction.eigfreq_eigval_hint(self.param_i, self.l, self.n)
self.assertTrue(all(np.isclose(self.eig_val, eig_val_i)))
calc_eig_freq = np.sqrt((a0_i - eig_val_i) / a2)
self.assertTrue(all(np.isclose(calc_eig_freq, eig_freq_i)))
# intermediate (_i) eigenfunction test
eig_funcs_i = np.array([pi.SecondOrderRobinEigenfunction(eig_freq_i[i], self.param_i, self.spatial_domain[-1],
self.eig_base.fractions[i](0))
for i in range(self.n)])
self.assertTrue(all(np.isclose([func(0) for func in eig_funcs_i],
[func(0) for func in self.eig_base.fractions])))
test_vec = np.linspace(0, self.l, 100)
for i in range(self.n):
self.assertTrue(all(np.isclose(self.eig_base.fractions[i](test_vec),
eig_funcs_i[i](test_vec) * np.exp(-a1 / 2 / a2 * test_vec))))
| gpl-3.0 |
DSLituiev/scikit-learn | examples/svm/plot_weighted_samples.py | 95 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
aselle/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 39 | 32726 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([(k,
out_el_shape(v, n_classes[k]
if n_classes is not None and
k in n_classes else None))
for k, v in list(y_shape.items())])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
@deprecated(None, 'Please convert numpy dtypes explicitly.')
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(
x, dict), y is not None and isinstance(y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (dict(
[(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (_check_dtype(self._y.dtype)
if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if x_is_dict:
num_samples = list(self._x.values())[0].shape[0]
elif tensor_util.is_tensor(self._x):
num_samples = self._x.shape[
0].value # shape will be a Dimension, extract an int
else:
num_samples = self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1))
if len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(
self._x.values())[0].shape[0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else {
self._input_placeholder.name:
extract(self._x, batch_indices)
})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = (self.output_shape, self._output_dtype,
self.n_classes)
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = (
[1] + list(y_first_el[0].shape
if isinstance(y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict(
[(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
@deprecated(None, 'Please feed input to tf.data to support dask.')
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.placeholder for input features mini batch.
output_placeholder: tf.placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
ClimbsRocks/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
ddervs/bad-boids | boids/tests/test_boids.py | 1 | 2868 | import os
import yaml
from mock import patch
from numpy.testing import assert_array_almost_equal
from matplotlib import animation
import boids.boids.boids as boids
from nose.tools import assert_raises
config_filename = 'boids/config.yaml'
config = yaml.load(open(config_filename))
def test_boids_fixtures():
regression_data = yaml.load(open(os.path.join(os.path.dirname(__file__), 'fixture.yml')))
boid_data = regression_data["reg_before"]
test_boids = boids.Boids(boid_data, config)
# Regression test
test_boids.update_boids()
for after, before in zip(regression_data["reg_after"], test_boids.boids):
assert_array_almost_equal(after, before, 2)
# Test sub_functions
test_boids.fly_to_middle()
for after, before in zip(regression_data["fly_to_middle"], test_boids.boids):
assert_array_almost_equal(after, before, 2)
test_boids.fly_away_nearby()
for after, before in zip(regression_data["fly_away_nearby"], test_boids.boids):
assert_array_almost_equal(after, before, 2)
test_boids.match_speed()
for after, before in zip(regression_data["match_speed"], test_boids.boids):
assert_array_almost_equal(after, before, 2)
test_boids.move_boids()
for after, before in zip(regression_data["move_boids"], test_boids.boids):
assert_array_almost_equal(after, before, 2)
def test_new_flock():
num_boids = 20
boids_range = [-100, 100]
test_boids = boids.new_flock(num_boids, boids_range, boids_range, boids_range, boids_range)
for array in test_boids:
# Check right number of boids
assert cmp(array.shape, (num_boids, 2)) == 0
# Check boids positions and velocities in range
in_range = boids_range[0] < array.all() < boids_range[1]
assert in_range
@patch.object(boids.Boids, 'update_boids')
def test_animate(mock_update_boids):
regression_data = yaml.load(open(os.path.join(os.path.dirname(__file__), 'fixture.yml')))
boid_data = regression_data["reg_before"]
test_boids = boids.Boids(boid_data, config)
# Test that animation calls update_boids method
frame = None
test_boids.animate(frame)
assert mock_update_boids.called
@patch.object(animation, 'FuncAnimation')
def test_run_animation(mock_FuncAnimation):
regression_data = yaml.load(open(os.path.join(os.path.dirname(__file__), 'fixture.yml')))
boid_data = regression_data["reg_before"]
test_boids = boids.Boids(boid_data, config)
# Test that run_animation calls FuncAnimation
test_boids.run_animation()
assert mock_FuncAnimation.called
def test_init():
# Test that appropriate exceptions raised if incorrect types passed
some_list = [1, 2, 3]
assert_raises(TypeError, boids.Boids, some_list, dict(key=some_list))
assert_raises(TypeError, boids.Boids, (some_list, some_list), some_list)
| mit |
chenyyx/scikit-learn-doc-zh | examples/en/cross_decomposition/plot_compare_cross_decomposition.py | 55 | 4953 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
# #############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n // 2]
Y_train = Y[:n // 2]
X_test = X[n // 2:]
Y_test = Y[n // 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
# #############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.scatter(X_train_r[:, 0], Y_train_r[:, 0], label="train",
marker="o", c="b", s=25)
plt.scatter(X_test_r[:, 0], Y_test_r[:, 0], label="test",
marker="o", c="r", s=25)
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.scatter(X_train_r[:, 1], Y_train_r[:, 1], label="train",
marker="o", c="b", s=25)
plt.scatter(X_test_r[:, 1], Y_test_r[:, 1], label="test",
marker="o", c="r", s=25)
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.scatter(X_train_r[:, 0], X_train_r[:, 1], label="train",
marker="*", c="b", s=50)
plt.scatter(X_test_r[:, 0], X_test_r[:, 1], label="test",
marker="*", c="r", s=50)
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.scatter(Y_train_r[:, 0], Y_train_r[:, 1], label="train",
marker="*", c="b", s=50)
plt.scatter(Y_test_r[:, 0], Y_test_r[:, 1], label="test",
marker="*", c="r", s=50)
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
# #############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
# #############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = cca.transform(X_train, Y_train)
X_test_r, Y_test_r = cca.transform(X_test, Y_test)
| gpl-3.0 |
squall1988/lquant | backtest/finance/risk/cumulative.py | 1 | 17396 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import math
import numpy as np
import backtest.utils.math_utils as zp_math
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import iteritems
from . risk import (
alpha,
check_entry,
choose_treasury,
downside_risk,
sharpe_ratio,
sortino_ratio,
)
from __init__ import VERSION_LABEL
log = logbook.Logger('Risk Cumulative')
choose_treasury = functools.partial(choose_treasury, lambda *args: '10year',
compound=False)
def information_ratio(algo_volatility, algorithm_return, benchmark_return):
"""
http://en.wikipedia.org/wiki/Information_ratio
Args:
algorithm_returns (np.array-like):
All returns during algorithm lifetime.
benchmark_returns (np.array-like):
All benchmark returns during algo lifetime.
Returns:
float. Information ratio.
"""
if zp_math.tolerant_equals(algo_volatility, 0):
return np.nan
# The square of the annualization factor is in the volatility,
# because the volatility is also annualized,
# i.e. the sqrt(annual factor) is in the volatility's numerator.
# So to have the the correct annualization factor for the
# Sharpe value's numerator, which should be the sqrt(annual factor).
# The square of the sqrt of the annual factor, i.e. the annual factor
# itself, is needed in the numerator to factor out the division by
# its square root.
return (algorithm_return - benchmark_return) / algo_volatility
class RiskMetricsCumulative(object):
"""
:Usage:
Instantiate RiskMetricsCumulative once.
Call update() method on each dt to update the metrics.
"""
METRIC_NAMES = (
'alpha',
'beta',
'sharpe',
'algorithm_volatility',
'benchmark_volatility',
'downside_risk',
'sortino',
'information',
)
def __init__(self, sim_params, env,
create_first_day_stats=False):
self.treasury_curves = env.treasury_curves
self.start_date = sim_params.period_start.replace(
hour=0, minute=0, second=0, microsecond=0
)
self.end_date = sim_params.period_end.replace(
hour=0, minute=0, second=0, microsecond=0
)
self.trading_days = env.days_in_range(self.start_date, self.end_date)
# Hold on to the trading day before the start,
# used for index of the zero return value when forcing returns
# on the first day.
self.day_before_start = self.start_date - env.trading_days.freq
last_day = normalize_date(sim_params.period_end)
if last_day not in self.trading_days:
last_day = pd.tseries.index.DatetimeIndex(
[last_day]
)
self.trading_days = self.trading_days.append(last_day)
self.sim_params = sim_params
self.env = env
self.create_first_day_stats = create_first_day_stats
cont_index = self.trading_days
self.cont_index = cont_index
self.cont_len = len(self.cont_index)
empty_cont = np.full(self.cont_len, np.nan)
self.algorithm_returns_cont = empty_cont.copy()
self.benchmark_returns_cont = empty_cont.copy()
self.algorithm_cumulative_leverages_cont = empty_cont.copy()
self.mean_returns_cont = empty_cont.copy()
self.annualized_mean_returns_cont = empty_cont.copy()
self.mean_benchmark_returns_cont = empty_cont.copy()
self.annualized_mean_benchmark_returns_cont = empty_cont.copy()
# The returns at a given time are read and reset from the respective
# returns container.
self.algorithm_returns = None
self.benchmark_returns = None
self.mean_returns = None
self.annualized_mean_returns = None
self.mean_benchmark_returns = None
self.annualized_mean_benchmark_returns = None
self.algorithm_cumulative_returns = empty_cont.copy()
self.benchmark_cumulative_returns = empty_cont.copy()
self.algorithm_cumulative_leverages = empty_cont.copy()
self.excess_returns = empty_cont.copy()
self.latest_dt_loc = 0
self.latest_dt = cont_index[0]
self.benchmark_volatility = empty_cont.copy()
self.algorithm_volatility = empty_cont.copy()
self.beta = empty_cont.copy()
self.alpha = empty_cont.copy()
self.sharpe = empty_cont.copy()
self.downside_risk = empty_cont.copy()
self.sortino = empty_cont.copy()
self.information = empty_cont.copy()
self.drawdowns = empty_cont.copy()
self.max_drawdowns = empty_cont.copy()
self.max_drawdown = 0
self.max_leverages = empty_cont.copy()
self.max_leverage = 0
self.current_max = -np.inf
self.daily_treasury = pd.Series(index=self.trading_days)
self.treasury_period_return = np.nan
self.num_trading_days = 0
def update(self, dt, algorithm_returns, benchmark_returns, leverage):
# Keep track of latest dt for use in to_dict and other methods
# that report current state.
self.latest_dt = dt
dt_loc = self.cont_index.get_loc(dt)
self.latest_dt_loc = dt_loc
self.algorithm_returns_cont[dt_loc] = algorithm_returns
self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1]
self.num_trading_days = len(self.algorithm_returns)
if self.create_first_day_stats:
if len(self.algorithm_returns) == 1:
self.algorithm_returns = np.append(0.0, self.algorithm_returns)
self.algorithm_cumulative_returns[dt_loc] = \
self.calculate_cumulative_returns(self.algorithm_returns)
algo_cumulative_returns_to_date = \
self.algorithm_cumulative_returns[:dt_loc + 1]
self.mean_returns_cont[dt_loc] = \
algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days
self.mean_returns = self.mean_returns_cont[:dt_loc + 1]
self.annualized_mean_returns_cont[dt_loc] = \
self.mean_returns_cont[dt_loc] * 252
self.annualized_mean_returns = \
self.annualized_mean_returns_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.mean_returns) == 1:
self.mean_returns = np.append(0.0, self.mean_returns)
self.annualized_mean_returns = np.append(
0.0, self.annualized_mean_returns)
self.benchmark_returns_cont[dt_loc] = benchmark_returns
self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.benchmark_returns) == 1:
self.benchmark_returns = np.append(0.0, self.benchmark_returns)
self.benchmark_cumulative_returns[dt_loc] = \
self.calculate_cumulative_returns(self.benchmark_returns)
benchmark_cumulative_returns_to_date = \
self.benchmark_cumulative_returns[:dt_loc + 1]
self.mean_benchmark_returns_cont[dt_loc] = \
benchmark_cumulative_returns_to_date[dt_loc] / \
self.num_trading_days
self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc]
self.annualized_mean_benchmark_returns_cont[dt_loc] = \
self.mean_benchmark_returns_cont[dt_loc] * 252
self.annualized_mean_benchmark_returns = \
self.annualized_mean_benchmark_returns_cont[:dt_loc + 1]
self.algorithm_cumulative_leverages_cont[dt_loc] = leverage
self.algorithm_cumulative_leverages = \
self.algorithm_cumulative_leverages_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.algorithm_cumulative_leverages) == 1:
self.algorithm_cumulative_leverages = np.append(
0.0,
self.algorithm_cumulative_leverages)
if not len(self.algorithm_returns) and len(self.benchmark_returns):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date,
dt=dt
)
raise Exception(message)
self.update_current_max()
self.benchmark_volatility[dt_loc] = \
self.calculate_volatility(self.benchmark_returns)
self.algorithm_volatility[dt_loc] = \
self.calculate_volatility(self.algorithm_returns)
# caching the treasury rates for the minutely case is a
# big speedup, because it avoids searching the treasury
# curves on every minute.
# In both minutely and daily, the daily curve is always used.
treasury_end = dt.replace(hour=0, minute=0)
if np.isnan(self.daily_treasury[treasury_end]):
treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
treasury_end,
self.env,
)
self.daily_treasury[treasury_end] = treasury_period_return
self.treasury_period_return = self.daily_treasury[treasury_end]
self.excess_returns[dt_loc] = (
self.algorithm_cumulative_returns[dt_loc] -
self.treasury_period_return)
self.beta[dt_loc] = self.calculate_beta()
self.alpha[dt_loc] = self.calculate_alpha()
self.sharpe[dt_loc] = self.calculate_sharpe()
self.downside_risk[dt_loc] = \
self.calculate_downside_risk()
self.sortino[dt_loc] = self.calculate_sortino()
self.information[dt_loc] = self.calculate_information()
self.max_drawdown = self.calculate_max_drawdown()
self.max_drawdowns[dt_loc] = self.max_drawdown
self.max_leverage = self.calculate_max_leverage()
self.max_leverages[dt_loc] = self.max_leverage
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
dt = self.latest_dt
dt_loc = self.latest_dt_loc
period_label = dt.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility':
self.benchmark_volatility[dt_loc],
'algo_volatility':
self.algorithm_volatility[dt_loc],
'treasury_period_return': self.treasury_period_return,
# Though the two following keys say period return,
# they would be more accurately called the cumulative return.
# However, the keys need to stay the same, for now, for backwards
# compatibility with existing consumers.
'algorithm_period_return':
self.algorithm_cumulative_returns[dt_loc],
'benchmark_period_return':
self.benchmark_cumulative_returns[dt_loc],
'beta': self.beta[dt_loc],
'alpha': self.alpha[dt_loc],
'sharpe': self.sharpe[dt_loc],
'sortino': self.sortino[dt_loc],
'information': self.information[dt_loc],
'excess_return': self.excess_returns[dt_loc],
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: (None if check_entry(k, v) else v)
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
for metric in self.METRIC_NAMES:
value = getattr(self, metric)[-1]
if isinstance(value, list):
if len(value) == 0:
value = np.nan
else:
value = value[-1]
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def calculate_cumulative_returns(self, returns):
return (1. + returns).prod() - 1
def update_current_max(self):
if len(self.algorithm_cumulative_returns) == 0:
return
current_cumulative_return = \
self.algorithm_cumulative_returns[self.latest_dt_loc]
if self.current_max < current_cumulative_return:
self.current_max = current_cumulative_return
def calculate_max_drawdown(self):
if len(self.algorithm_cumulative_returns) == 0:
return self.max_drawdown
# The drawdown is defined as: (high - low) / high
# The above factors out to: 1.0 - (low / high)
#
# Instead of explicitly always using the low, use the current total
# return value, and test that against the max drawdown, which will
# exceed the previous max_drawdown iff the current return is lower than
# the previous low in the current drawdown window.
cur_drawdown = 1.0 - (
(1.0 + self.algorithm_cumulative_returns[self.latest_dt_loc])
/
(1.0 + self.current_max))
self.drawdowns[self.latest_dt_loc] = cur_drawdown
if self.max_drawdown < cur_drawdown:
return cur_drawdown
else:
return self.max_drawdown
def calculate_max_leverage(self):
# The leverage is defined as: the gross_exposure/net_liquidation
# gross_exposure = long_exposure + abs(short_exposure)
# net_liquidation = ending_cash + long_exposure + short_exposure
cur_leverage = self.algorithm_cumulative_leverages_cont[
self.latest_dt_loc]
return max(cur_leverage, self.max_leverage)
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(
self.algorithm_volatility[self.latest_dt_loc],
self.annualized_mean_returns_cont[self.latest_dt_loc],
self.daily_treasury[self.latest_dt.date()])
def calculate_sortino(self):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
return sortino_ratio(
self.annualized_mean_returns_cont[self.latest_dt_loc],
self.daily_treasury[self.latest_dt.date()],
self.downside_risk[self.latest_dt_loc])
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(
self.algorithm_volatility[self.latest_dt_loc],
self.annualized_mean_returns_cont[self.latest_dt_loc],
self.annualized_mean_benchmark_returns_cont[self.latest_dt_loc])
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(
self.annualized_mean_returns_cont[self.latest_dt_loc],
self.treasury_period_return,
self.annualized_mean_benchmark_returns_cont[self.latest_dt_loc],
self.beta[self.latest_dt_loc])
def calculate_volatility(self, daily_returns):
if len(daily_returns) <= 1:
return 0.0
return np.std(daily_returns, ddof=1) * math.sqrt(252)
def calculate_downside_risk(self):
return downside_risk(self.algorithm_returns,
self.mean_returns,
252)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two
# values, so return none.
if len(self.algorithm_returns) < 2:
return 0.0
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return beta
def __getstate__(self):
state_dict = {k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
STATE_VERSION = 3
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 3
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("RiskMetricsCumulative \
saved state is too old.")
self.__dict__.update(state)
| bsd-2-clause |
rsignell-usgs/notebook | system-test/Theme_2_Extreme_Events/Scenario_2A/ModelDataCompare_Waves/Model_Obs_Compare_Waves.py | 3 | 12427 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# ># IOOS System Test: [Extreme Events Theme:](https://github.com/ioos/system-test/wiki/Development-of-Test-Themes#theme-2-extreme-events) Coastal Inundation
# <markdowncell>
# ### Can we compare observed and modeled wave parameters?
# This notebook is based on [IOOS System Test: Inundation](http://nbviewer.ipython.org/github/ioos/system-test/blob/master/Theme_2_Extreme_Events/Scenario_2A_Coastal_Inundation/Scenario_2A_Water_Level_Signell.ipynb)
#
# Methodology:
# * Define temporal and spatial bounds of interest, as well as parameters of interest
# * Search for available service endpoints in the NGDC CSW catalog meeting search criteria
# * Extract OPeNDAP data endpoints from model datasets and SOS endpoints from observational datasets
# * Obtain observation data sets from stations within the spatial boundaries
# * Plot observation stations on a map (red marker if not enough data)
# * Using DAP (model) endpoints find all available models data sets that fall in the area of interest, for the specified time range, and extract a model grid cell closest to all the given station locations
# * Plot modelled and observed time series wave data on same axes for comparison
#
# <headingcell level=4>
# import required libraries
# <codecell>
import datetime as dt
from warnings import warn
from io import BytesIO
import folium
import netCDF4
from IPython.display import HTML
import iris
from iris.exceptions import CoordinateNotFoundError, ConstraintMismatchError
import matplotlib.pyplot as plt
from owslib.csw import CatalogueServiceWeb
from owslib import fes
import pandas as pd
from pyoos.collectors.ndbc.ndbc_sos import NdbcSos
import requests
from utilities import (fes_date_filter, collector2df, find_timevar, find_ij, nearxy, service_urls, mod_df,
get_coordinates, get_station_longName, inline_map)
# <headingcell level=4>
# Speficy Temporal and Spatial conditions
# <codecell>
bounding_box_type = "box"
# Bounding Box [lon_min, lat_min, lon_max, lat_max]
area = {'Hawaii': [-160.0, 18.0, -154., 23.0],
'Gulf of Maine': [-72.0, 41.0, -69.0, 43.0],
'New York harbor region': [-75., 39., -71., 41.5],
'Puerto Rico': [-75, 12, -55, 26],
'East Coast': [-77, 34, -70, 40],
'North West': [-130, 38, -121, 50],
'Gulf of Mexico': [-92, 28, -84, 31],
'Arctic': [-179, 63, -140, 80],
'North East': [-74, 40, -69, 42],
'Virginia Beach': [-76, 34, -74, 38]}
bounding_box = area['East Coast']
#temporal range - May 1 2014 - May 10 2014
start_date = dt.datetime(2014,5,1,0,50).strftime('%Y-%m-%d %H:%M')
end_date = dt.datetime(2014,5,10).strftime('%Y-%m-%d %H:00')
time_date_range = [start_date,end_date] #start_date_end_date
jd_start = dt.datetime.strptime(start_date, '%Y-%m-%d %H:%M')
jd_stop = dt.datetime.strptime(end_date, '%Y-%m-%d %H:%M')
print start_date,'to',end_date
# <headingcell level=4>
# Specify data names of interest
# <codecell>
#put the names in a dict for ease of access
# put the names in a dict for ease of access
data_dict = {}
sos_name = 'waves'
data_dict["waves"] = {"names":['sea_surface_wave_significant_height',
'significant_wave_height',
'significant_height_of_wave',
'sea_surface_wave_significant_height(m)',
'sea_surface_wave_significant_height (m)',
'water_surface_height'],
"sos_name":["waves"]}
# <headingcell level=3>
# Search CSW for datasets of interest
# <codecell>
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw' # NGDC Geoportal
csw = CatalogueServiceWeb(endpoint,timeout=60)
# <codecell>
# convert User Input into FES filters
start,stop = fes_date_filter(start_date,end_date)
bbox = fes.BBox(bounding_box)
#use the search name to create search filter
or_filt = fes.Or([fes.PropertyIsLike(propertyname='apiso:AnyText',literal='*%s*' % val,
escapeChar='\\',wildCard='*',singleChar='?') for val in data_dict["waves"]["names"]])
# try request using multiple filters "and" syntax: [[filter1,filter2]]
filter_list = [fes.And([ bbox, start, stop, or_filt]) ]
csw.getrecords2(constraints=filter_list,maxrecords=1000,esn='full')
print str(len(csw.records)) + " csw records found"
# <markdowncell>
# #### Dap URLs
# <codecell>
dap_urls = service_urls(csw.records)
#remove duplicates and organize
dap_urls = sorted(set(dap_urls))
print "Total DAP:",len(dap_urls)
#print the first 5...
print "\n".join(dap_urls[0:5])
# <markdowncell>
# #### SOS URLs
# <codecell>
sos_urls = service_urls(csw.records,service='sos:url')
#remove duplicates and organize
sos_urls = sorted(set(sos_urls))
print "Total SOS:",len(sos_urls)
print "\n".join(sos_urls)
# <markdowncell>
# ###Get most recent observations from all stations in bounding box
# <codecell>
start_time = dt.datetime.strptime(start_date,'%Y-%m-%d %H:%M')
end_time = dt.datetime.strptime(end_date,'%Y-%m-%d %H:%M')
iso_start = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
iso_end = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Define the SOS collector
collector = NdbcSos()
print collector.server.identification.title
collector.variables = data_dict["waves"]["sos_name"]
collector.server.identification.title
# Don't specify start and end date in the filter and the most recent observation will be returned
collector.filter(bbox=bounding_box,
variables=data_dict["waves"]["sos_name"])
response = collector.raw(responseFormat="text/csv")
obs_loc_df = pd.read_csv(BytesIO(response.encode('utf-8')),
parse_dates=True,
index_col='date_time')
# Now let's specify start and end times
collector.start_time = start_time
collector.end_time = end_time
ofrs = collector.server.offerings
# <codecell>
obs_loc_df.head()
# <codecell>
stations = [sta.split(':')[-1] for sta in obs_loc_df['station_id']]
obs_lon = [sta for sta in obs_loc_df['longitude (degree)']]
obs_lat = [sta for sta in obs_loc_df['latitude (degree)']]
# <headingcell level=3>
# Request CSV response from SOS and convert to Pandas DataFrames
# <codecell>
ts_rng = pd.date_range(start=start_date, end=end_date)
ts = pd.DataFrame(index=ts_rng)
# Save all of the observation data into a list of dataframes
obs_df = []
# Create a list of dataframes for just wave heights for comparing with modeled wave heights later
Hs_obs_df = []
for sta in stations:
raw_df = collector2df(collector, sta, sos_name)
obs_df.append(pd.DataFrame(pd.concat([raw_df, ts],axis=1)))
obs_df[-1].name = raw_df.name
if raw_df.empty:
Hs_obs_df.append(pd.DataFrame())
else:
Hs_obs_df.append(pd.DataFrame(pd.concat([raw_df, ts],axis=1)['sea_surface_wave_significant_height (m)']))
Hs_obs_df[-1].name = raw_df.name
# <markdowncell>
# ### Plot the Observation Stations on Map
# <codecell>
min_data_pts = 20
# Find center of bounding box
lat_center = abs(bounding_box[3]-bounding_box[1])/2 + bounding_box[1]
lon_center = abs(bounding_box[0]-bounding_box[2])/2 + bounding_box[0]
m = folium.Map(location=[lat_center, lon_center], zoom_start=6)
n = 0
for df in obs_df:
#get the station data from the sos end point
longname = df.name
lat = obs_loc_df['latitude (degree)'][n]
lon = obs_loc_df['longitude (degree)'][n]
popup_string = ('<b>Station:</b><br>'+ longname)
if len(df) > min_data_pts:
m.simple_marker([lat, lon], popup=popup_string)
else:
#popup_string += '<br>No Data Available'
popup_string += '<br>Not enough data available<br>requested pts: ' + str(min_data_pts ) + '<br>Available pts: ' + str(len(Hs_obs_df[n]))
m.circle_marker([lat, lon], popup=popup_string, fill_color='#ff0000', radius=10000, line_color='#ff0000')
n += 1
m.line(get_coordinates(bounding_box,bounding_box_type), line_color='#FF0000', line_weight=5)
inline_map(m)
# <codecell>
### Plot Hs and Tp for each station
# <codecell>
for df in obs_df:
if len(df) > min_data_pts:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20,5))
df['sea_surface_wave_significant_height (m)'].plot(ax=axes[0], color='r')
axes[0].set_title(df.name)
axes[0].set_ylabel('Hs (m)')
df['sea_surface_wave_peak_period (s)'].plot(ax=axes[1])
axes[1].set_title(df.name)
axes[1].set_ylabel('Tp (s)')
# <markdowncell>
# ###Get model output from OPeNDAP URLS
# Try to open all the OPeNDAP URLS using Iris from the British Met Office. If we can open in Iris, we know it's a model result.
# <codecell>
name_in_list = lambda cube: cube.standard_name in data_dict['waves']['names']
constraint = iris.Constraint(cube_func=name_in_list)
# <codecell>
# Use only data within 0.04 degrees (about 4 km).
max_dist = 0.04
# Use only data where the standard deviation of the time series exceeds 0.01 m (1 cm).
# This eliminates flat line model time series that come from land points that should have had missing values.
min_var = 0.01
for url in dap_urls:
if 'cdip' in url:
# The CDIP buoys are known to be observed data, so let's just skip
continue
try:
a = iris.load_cube(url, constraint)
# take first 20 chars for model name
mod_name = a.attributes['title'][0:20]
r = a.shape
timevar = find_timevar(a)
lat = a.coord(axis='Y').points
lon = a.coord(axis='X').points
jd = timevar.units.num2date(timevar.points)
start = timevar.units.date2num(jd_start)
istart = timevar.nearest_neighbour_index(start)
stop = timevar.units.date2num(jd_stop)
istop = timevar.nearest_neighbour_index(stop)
# Only proceed if we have data in the range requested.
if istart != istop:
nsta = len(stations)
if len(r) == 3:
print('[Structured grid model]:', url)
d = a[0, :, :].data
# Find the closest non-land point from a structured grid model.
if len(lon.shape) == 1:
lon, lat = np.meshgrid(lon, lat)
j, i, dd = find_ij(lon, lat, d, obs_lon, obs_lat)
for n in range(nsta):
# Only use if model cell is within 0.01 degree of requested
# location.
if dd[n] <= max_dist:
arr = a[istart:istop, j[n], i[n]].data
if arr.std() >= min_var:
c = mod_df(arr, timevar, istart, istop,
mod_name, ts)
name = Hs_obs_df[n].name
Hs_obs_df[n] = pd.concat([Hs_obs_df[n], c], axis=1)
Hs_obs_df[n].name = name
elif len(r) == 2:
print('[Unstructured grid model]:', url)
# Find the closest point from an unstructured grid model.
index, dd = nearxy(lon.flatten(), lat.flatten(),
obs_lon, obs_lat)
for n in range(nsta):
# Only use if model cell is within 0.1 degree of requested
# location.
if dd[n] <= max_dist:
arr = a[istart:istop, index[n]].data
if arr.std() >= min_var:
c = mod_df(arr, timevar, istart, istop,
mod_name, ts)
name = Hs_obs_df[n].name
Hs_obs_df[n] = pd.concat([Hs_obs_df[n], c], axis=1)
Hs_obs_df[n].name = name
elif len(r) == 1:
print('[Data]:', url)
except (ValueError, RuntimeError, CoordinateNotFoundError,
ConstraintMismatchError) as e:
warn("\n%s\n" % e)
pass
# <markdowncell>
# ### Plot Modeled vs Obs Wave Height
# <codecell>
for df in Hs_obs_df:
# Make sure there is obs data at the station for comparison
if 'sea_surface_wave_significant_height (m)' in df.columns:
ax = df.plot(figsize=(14, 6), title=df.name, legend=False)
plt.setp(ax.lines[0], linewidth=4.0, color='0.7', zorder=1, marker='.')
ax.legend()
ax.set_ylabel('m')
| mit |
ilo10/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 205 | 10378 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
rexshihaoren/scikit-learn | sklearn/pipeline.py | 162 | 21103 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
polyanskiy/refractiveindex.info-scripts | scripts/Adachi 1989 - InGaAs.py | 1 | 3831 | # -*- coding: utf-8 -*-
# Author: Mikhail Polyanskiy
# Last modified: 2017-07-08
# Original data: Adachi 1989, https://doi.org/10.1063/1.343580
# In(1-x)Ga(x)As; x=0.48
import numpy as np
import matplotlib.pyplot as plt
π = np.pi
# model parameters
E0 = 0.75 #eV
Δ0 = 1.04-E0 #eV
E1 = 2.57 #eV
Δ1 = 2.83-E1 #eV
E2 = 4.41 #eV
Eg = 1.20 #eV
A = 1.20 #eV**1.5
B1 = 3.84
B2 = 1.48
B11 = 7.57 #eV**-0.5
B21 = 2.96 #eV**-0.5
Γ = 0.14 #eV
C = 2.90
γ = 0.225
D = 20.7
εinf = 2.8
def H(x): #Heviside function
return 0.5 * (np.sign(x) + 1)
def Epsilon_A(ħω): #E0
χ0 = ħω/E0
χso = ħω / (E0+Δ0)
H0 = H(1-χ0)
Hso = H(1-χso)
fχ0 = χ0**-2 * ( 2 -(1+χ0)**0.5 - ((1-χ0)*H0)**0.5 )
fχso = χso**-2 * ( 2 - (1+χso)**0.5 - ((1-χso)*Hso)**0.5 )
H0 = H(χ0-1)
Hso = H(χso-1)
ε2 = A/(ħω)**2 * ( ((ħω-E0)*H0)**0.5 + 0.5*((ħω-E0-Δ0)*Hso)**0.5)
ε1 = A*E0**-1.5 * (fχ0+0.5*(E0/(E0+Δ0))**1.5*fχso)
return ε1 + 1j*ε2
def Epsilon_B(ħω): #E1
χ1 = ħω/E1
χ1s = ħω/(E1+Δ1)
H1 = H(1-χ1)
H1s = H(1-χ1s)
ε2 = ( π*χ1**-2*(B1-B11*((E1-ħω)*H1)**0.5)
+ π*χ1s**-2*(B2-B21*((E1+Δ1-ħω)*H1s)**0.5) )
ε2 *= H(ε2) #undocumented trick: ignore negative ε2
χ1 = (ħω+1j*Γ)/E1
χ1s = (ħω+1j*Γ)/(E1+Δ1)
ε1 = -B1*χ1**-2*np.log(1-χ1**2) - B2*χ1s**-2*np.log(1-χ1s**2)
return ε1.real + 1j*ε2.real
def Epsilon_C(ħω): #E2
χ2 = ħω/E2
ε2 = C*χ2*γ / ((1-χ2**2)**2+(χ2*γ)**2)
ε1 = C*(1-χ2**2) / ((1-χ2**2)**2+(χ2*γ)**2)
return ε1 + 1j*ε2
def Epsilon_D(ħω): #Eg
# ignoring ħωq - no data in the paper
Ech = E1
χg = Eg/ħω
χch = ħω/Ech
Hg = H(1-χg)
Hch = H(1-χch)
ε2 = D/ħω**2 * (ħω-Eg)**2 * Hg * Hch
return 1j*ε2
ev_min=0.1
ev_max=6
npoints=200
eV = np.linspace(ev_min, ev_max, npoints)
μm = 4.13566733e-1*2.99792458/eV
εA = Epsilon_A(eV)
εB = Epsilon_B(eV)
εC = Epsilon_C(eV)
εD = Epsilon_D(eV)
ε = εA + εB + εC + εD + εinf
n = (ε**.5).real
k = (ε**.5).imag
α = 4*π*k/μm*1e4 #1/cm
#============================ DATA OUTPUT =================================
file = open('out.txt', 'w')
for i in range(npoints-1, -1, -1):
file.write('\n {:.4e} {:.4e} {:.4e}'.format(μm[i],n[i],k[i]))
file.close()
#=============================== PLOT =====================================
plt.rc('font', family='Arial', size='14')
#plot ε1 vs eV
plt.figure(1)
plt.plot(eV, ε.real, label="ε1")
plt.plot(eV, εA.real, label="Re(εA)")
plt.plot(eV, εB.real, label="Re(εB)")
plt.plot(eV, εC.real, label="Re(εC)")
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε1')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot ε2 vs eV
plt.figure(2)
plt.plot(eV, ε.imag, label="ε2")
plt.plot(eV, εA.imag, label="Im(εA)")
plt.plot(eV, εB.imag, label="Im(εB)")
plt.plot(eV, εC.imag, label="Im(εC)")
plt.plot(eV, εD.imag, label="Im(εD)")
plt.yscale('log')
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε2')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
plt.ylim([1e-2,1e2])
#plot n,k vs eV
plt.figure(3)
plt.plot(eV, n, label="n")
plt.plot(eV, k, label="k")
plt.xlabel('Photon energy (eV)')
plt.ylabel('n, k')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs μm
plt.figure(4)
plt.plot(μm, n, label="n")
plt.plot(μm, k, label="k")
plt.xlabel('Wavelength (μm)')
plt.ylabel('n, k')
plt.xscale('log')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot α vs eV
plt.figure(7)
plt.plot(eV,α)
plt.yscale('log')
plt.ylim([1e3,1e7])
plt.xlabel('Photon energy (eV)')
plt.ylabel('α (1/cm)') | gpl-3.0 |
georgetown-analytics/machine-learning | examples/FrancoMBM/Wine.py | 1 | 6158 |
# importing libraries
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import IPython
from IPython.display import display
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import Normalizer
from sklearn import cross_validation
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report
# importing datasets
raw_df_red = pd.read_csv(r"C:\Users\franc\Desktop\SecondDesk\DataScienceCertificate\Classes\Assingments\MachineLearning\Homework\winequality-red.csv", sep =';')
raw_df_white = pd.read_csv(r"C:\Users\franc\Desktop\SecondDesk\DataScienceCertificate\Classes\Assingments\MachineLearning\Homework\winequality-white.csv", sep =';')
# exploring datasets
raw_df_red.describe()
raw_df_white.describe()
raw_df_white.info()
#-------------------------------white whine selection--------------------------
X = raw_df_white.iloc[:,:-1].values # independent variables X
y = raw_df_white['quality'].values # dependent Variables y
X_train_white, X_test_white, y_train_white, y_test_white = cross_validation.train_test_split(X, y, test_size = 0.2, random_state = 0)
# visual data exploration
X_train = raw_df_white.iloc[:,:-1]
y_train = raw_df_white['quality']
pd.plotting.scatter_matrix(X_train, c = y_train, figsize = (30, 30), marker ='o', hist_kwds = {'bins': 20},
s = 60, alpha = 0.7)
#before scaling
plt.boxplot(X_train_white, manage_xticks = False)
plt.yscale("symlog")
plt.xlabel("Features")
plt.ylabel("Target Variable")
plt.show()
scaler = StandardScaler()
#scaler = MinMaxScaler()
#scaler = Normalizer()
X_train_white = scaler.fit(X_train_white).transform(X_train_white)
X_test_white = scaler.fit(X_test_white).transform(X_test_white)
# after scaling
plt.boxplot(X_train_white, manage_xticks = False)
plt.yscale("symlog")
plt.xlabel("Features")
plt.ylabel("Target Variable")
plt.show()
# performing PCA
from sklearn.decomposition import PCA
pca = PCA(n_components = None) # input a number for feature extraction
X_train_white = pca.fit_transform(X_train_white)
X_test_white = pca.transform(X_test_white)
explained_var = pca.explained_variance_ratio_
print (explained_var)
#-----------------KNN--------------------------------------
knn = KNeighborsClassifier(n_neighbors = 10, metric = 'manhattan', weights = 'distance', algorithm = 'auto')
knn.fit(X_train_white, y_train_white)
predicted_knn = knn.predict(X_test_white)
# print("Predictions: {}".format(predicted_knn))
scores = cross_val_score(knn, X = X_train_white, y = y_train_white)
print ("Cross Validation Scores: {}".format(scores))
report = classification_report(y_test_white, predicted_knn)
print (report)
# Finding the best parameters for knn:
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
params2 = [{'n_neighbors': [1,10,50,100], 'algorithm': ['auto','ball_tree','kd_tree' ],
'weights': ['uniform', 'distance'], 'metric': ['minkowski', 'manhattan']}]
grid_search = GridSearchCV(estimator = knn, param_grid = params2, scoring = 'accuracy', cv = 5, n_jobs = 1)
grid_search = grid_search.fit(X_train_white, y_train_white)
accuracy = grid_search.best_score_
best_params = grid_search.best_params_
print(accuracy)
print(best_params)
train_accuracy = []
test_accuracy = []
neighbors = range(1,100,10)
algorithms = ['auto', 'ball_tree', 'kd_tree']
weights = ['uniform', 'distance']
for i in neighbors:
knn = KNeighborsClassifier(n_neighbors = i, metric = 'manhattan', weights = 'distance', algorithm = 'auto')
knn.fit(X_train_white, y_train_white)
train_accuracy.append(knn.score(X_train_white, y_train_white))
test_accuracy.append(knn.score(X_test_white, y_test_white))
plt.plot(neighbors, train_accuracy, label = 'Train set accuracy')
plt.plot(neighbors, test_accuracy, label = 'Test set accuracy')
plt.ylabel("Accuracy")
plt.xlabel("Number of neighbors")
plt.legend()
plt.show()
#------------------------------- Kernel SVC:----------------------------------
from sklearn.svm import SVC
svm = SVC(C = 1000, kernel = 'rbf', gamma = 1)
svm.fit(X_train_white, y_train_white)
predicted = svm.predict(X_test_white)
#print("Predictions: {}".format(predicted))scores = cross_val_score(svm, X = X_train_white, y = y_train_white)
report = classification_report(y_test_white, predicted)
print (report)
# print ("Cross Validation Scores: {}".format(scores))
# -----------Finding the best parameters for SVC----------
params = [{'C': [1, 10, 100, 1000], 'kernel': ['rbf'], 'gamma': [1, 0.1, 0.01, 0.001]}]
grid_search = GridSearchCV(estimator = svm, param_grid = params, scoring = 'accuracy', cv = 5, n_jobs =1)
grid_search = grid_search.fit(X_train_white, y_train_white)
accuracySVC = grid_search.best_score_
best_paramsSVC = grid_search.best_params_
print (accuracySVC)
print (best_paramsSVC)
train_accuracy = []
test_accuracy = []
Ci = [10, 100, 1000]
for i in Ci:
svm = SVC(C = i, kernel = 'rbf', gamma = 1) # try rbf, linear and poly
svm.fit(X_train_white, y_train_white)
train_accuracy.append(svm.score(X_train_white, y_train_white))
test_accuracy.append(svm.score(X_test_white, y_test_white))
plt.plot(Ci, train_accuracy, label = 'Train set accuracy')
plt.plot(Ci, test_accuracy, label = 'Test set accuracy')
plt.ylabel("Accuracy")
plt.xlabel("C")
plt.legend()
plt.show()
####---------XGBoost-----------------
from xgboost.sklearn import XGBClassifier
from xgboost.sklearn import XGBRegressor
xclas = XGBClassifier() # for classifier
xclas.fit(X_train_white, y_train_white)
y_pred = xclas.predict(X_test_white)
cross_val_score(xclas, X_train_white, y_train_white)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test_white, y_pred)
print (cm)
| mit |
shyamalschandra/scikit-learn | sklearn/datasets/species_distributions.py | 64 | 7917 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = DIRECTORY_URL + "samples.zip"
COVERAGES_URL = DIRECTORY_URL + "coverages.zip"
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| bsd-3-clause |
jpautom/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
bpow/gemini | gemini/scripts/gemini_install.py | 2 | 18433 | #!/usr/bin/env python
"""Installer for gemini: a lightweight db framework for disease and population genetics.
https://github.com/arq5x/gemini
Handles installation of:
- Required third party software
- Required Python libraries
- Gemini application
- Associated data files
Requires: Python 2.7 (or 2.6 and argparse), git, and compilers (gcc, g++)
Run gemini_install.py -h for usage.
"""
import argparse
import platform
import os
import shutil
import subprocess
import sys
import urllib2
import urllib
remotes = {"requirements_pip": "https://raw.github.com/arq5x/gemini/master/requirements.txt",
"requirements_conda": "",
"versioned_installations": "https://raw.githubusercontent.com/arq5x/gemini/master/versioning/",
"cloudbiolinux": "https://github.com/chapmanb/cloudbiolinux.git",
"gemini": "https://github.com/arq5x/gemini.git",
"anaconda": "http://repo.continuum.io/miniconda/Miniconda-3.7.0-%s-x86%s.sh"}
remotes_dev = remotes.copy()
remotes_dev.update({
"requirements_pip": "https://raw.github.com/arq5x/gemini/dev/requirements.txt",
"gemini": "git+https://github.com/arq5x/gemini.git@dev",
"requirements_conda": "https://raw.githubusercontent.com/arq5x/gemini/dev/versioning/unstable/requirements_conda.txt",
})
remotes_bp = remotes_dev
remotes_bp.update({
"requirements_pip": "https://raw.github.com/brentp/gemini/dev/requirements.txt",
"gemini": "git+https://github.com/brentp/gemini.git@dev",
"requirements_conda": "https://raw.githubusercontent.com/brentp/gemini/dev/versioning/unstable/requirements_conda.txt",
})
def main(args, remotes=remotes):
check_dependencies()
clean_env_variables()
work_dir = os.path.join(os.getcwd(), "tmpgemini_install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
if args.gemini_version in ("unstable", "bp"):
if args.gemini_version == "unstable":
remotes = remotes_dev
else:
remotes = remotes_bp
requirements_pip = remotes['requirements_pip']
requirements_conda = remotes['requirements_conda']
urllib.urlretrieve(requirements_pip, filename='_pip_dev.txt')
urllib.urlretrieve(requirements_conda, filename='_conda_dev.txt')
# quick hack to support testing installs:
if args.gemini_version == "bp":
for f in ('_pip_dev.txt', '_conda_dev.txt'):
contents = open(f).read().replace('arq5x', 'brentp')
with open(f, 'w') as fh:
fh.write(contents)
remotes.update({'requirements_pip': '_pip_dev.txt', 'requirements_conda': '_conda_dev.txt'})
elif args.gemini_version != 'latest':
requirements_pip = os.path.join(remotes['versioned_installations'],
args.gemini_version, 'requirements_pip.txt')
requirements_conda = os.path.join(remotes['versioned_installations'],
args.gemini_version, 'requirements_conda.txt')
try:
urllib2.urlopen(requirements_pip)
except:
sys.exit('Gemini version %s could not be found. Try the latest version.' % args.gemini_version)
remotes.update({'requirements_pip': requirements_pip, 'requirements_conda': requirements_conda})
print "Installing isolated base python installation"
make_dirs(args)
anaconda = install_anaconda_python(args, remotes)
print "Installing gemini..."
install_conda_pkgs(anaconda, remotes, args)
gemini = install_gemini(anaconda, remotes, args.datadir, args.tooldir, args.sudo)
if args.install_tools:
cbl = get_cloudbiolinux(remotes["cloudbiolinux"])
fabricrc = write_fabricrc(cbl["fabricrc"], args.tooldir, args.datadir, args.sudo)
print "Installing associated tools..."
install_tools(gemini["fab"], cbl["tool_fabfile"], fabricrc)
os.chdir(work_dir)
install_data(gemini["python"], gemini["data_script"], args)
os.chdir(work_dir)
test_script = install_testbase(args.datadir, remotes["gemini"], gemini)
print "Finished: gemini, tools and data installed"
print " Tools installed in:\n %s" % args.tooldir
print " Data installed in:\n %s" % args.datadir
print " Run tests with:\n cd %s && bash %s" % (os.path.dirname(test_script),
os.path.basename(test_script))
print " NOTE: be sure to add %s/bin to your PATH." % args.tooldir
print " NOTE: Install data files for GERP_bp & CADD_scores (not installed by default).\n "
shutil.rmtree(work_dir)
def install_gemini(anaconda, remotes, datadir, tooldir, use_sudo):
"""Install gemini plus python dependencies inside isolated Anaconda environment.
"""
# Work around issue with distribute where asks for 'distribute==0.0'
# try:
# subprocess.check_call([anaconda["easy_install"], "--upgrade", "distribute"])
# except subprocess.CalledProcessError:
# try:
# subprocess.check_call([anaconda["pip"], "install", "--upgrade", "distribute"])
# except subprocess.CalledProcessError:
# pass
# Ensure latest version of fabric for running CloudBioLinux
subprocess.check_call([anaconda["pip"], "install", "fabric>=1.7.0"])
# allow downloads excluded in recent pip (1.5 or greater) versions
try:
p = subprocess.Popen([anaconda["pip"], "--version"], stdout=subprocess.PIPE)
pip_version = p.communicate()[0].split()[1]
except:
pip_version = ""
pip_compat = []
if pip_version >= "1.5":
for req in ["python-graph-core", "python-graph-dot"]:
pip_compat += ["--allow-external", req, "--allow-unverified", req]
# Set PIP SSL certificate to installed conda certificate to avoid SSL errors
cert_file = os.path.join(anaconda["dir"], "ssl", "cert.pem")
if os.path.exists(cert_file):
os.environ["PIP_CERT"] = cert_file
subprocess.check_call([anaconda["pip"], "install"] + pip_compat + ["-r", remotes["requirements_pip"]])
python_bin = os.path.join(anaconda["dir"], "bin", "python")
_cleanup_problem_files(anaconda["dir"])
_add_missing_inits(python_bin)
for final_name, ve_name in [("gemini", "gemini"), ("gemini_python", "python"),
("gemini_pip", "pip")]:
final_script = os.path.join(tooldir, "bin", final_name)
ve_script = os.path.join(anaconda["dir"], "bin", ve_name)
sudo_cmd = ["sudo"] if use_sudo else []
if os.path.lexists(final_script):
subprocess.check_call(sudo_cmd + ["rm", "-f", final_script])
else:
subprocess.check_call(sudo_cmd + ["mkdir", "-p", os.path.dirname(final_script)])
cmd = ["ln", "-s", ve_script, final_script]
subprocess.check_call(sudo_cmd + cmd)
library_loc = check_output("%s -c 'import gemini; print gemini.__file__'" % python_bin,
shell=True)
return {"fab": os.path.join(anaconda["dir"], "bin", "fab"),
"data_script": os.path.join(os.path.dirname(library_loc.strip()), "install-data.py"),
"python": python_bin,
"cmd": os.path.join(anaconda["dir"], "bin", "gemini")}
def install_conda_pkgs(anaconda, remotes, args):
if args.gemini_version != 'latest':
pkgs = ["--file", remotes['requirements_conda']]
else:
pkgs = ["bcolz", "conda", "cython", "ipyparallel",
"jinja2", "nose", "numexpr", "numpy", "openssl", "pip", "pybedtools",
"pycrypto", "pyparsing", "python-graph-core", "python-graph-dot",
"pyyaml", "pyzmq", "pandas", "scipy"]
if platform.architecture()[0] != "32bit":
pkgs += ["bx-python", "pysam", "ipython-cluster-helper"]
channels = ["-c", "https://conda.binstar.org/bcbio"]
subprocess.check_call([anaconda["conda"], "install", "--yes"] + channels + pkgs)
def install_anaconda_python(args, remotes):
"""Provide isolated installation of Anaconda python.
http://docs.continuum.io/anaconda/index.html
"""
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
conda = os.path.join(bindir, "conda")
if platform.mac_ver()[0]:
distribution = "macosx"
else:
distribution = "linux"
if platform.architecture()[0] == "32bit":
arch = ""
else:
arch = "_64"
if not os.path.exists(anaconda_dir) or not os.path.exists(conda):
if os.path.exists(anaconda_dir):
shutil.rmtree(anaconda_dir)
url = remotes["anaconda"] % ("MacOSX" if distribution == "macosx" else "Linux", arch)
if not os.path.exists(os.path.basename(url)):
subprocess.check_call(["wget", url])
subprocess.check_call("bash %s -b -p %s" %
(os.path.basename(url), anaconda_dir), shell=True)
return {"conda": conda,
"pip": os.path.join(bindir, "pip"),
"easy_install": os.path.join(bindir, "easy_install"),
"dir": anaconda_dir}
def _add_missing_inits(python_bin):
"""pip/setuptools strips __init__.py files with namespace declarations.
I have no idea why, but this adds them back, skipping if compiled into an egg.
"""
library_loc = check_output("%s -c 'import pygraph.classes.graph; "
"print pygraph.classes.graph.__file__'" % python_bin,
shell=True)
pygraph_init = os.path.normpath(os.path.join(os.path.dirname(library_loc.strip()), os.pardir,
"__init__.py"))
if not os.path.exists(pygraph_init):
if os.path.isdir(os.path.dirname(pygraph_init)):
with open(pygraph_init, "w") as out_handle:
out_handle.write("__import__('pkg_resources').declare_namespace(__name__)\n")
def _cleanup_problem_files(venv_dir):
"""Remove problem bottle items in PATH which conflict with site-packages
"""
for cmd in ["bottle.py", "bottle.pyc"]:
bin_cmd = os.path.join(venv_dir, "bin", cmd)
if os.path.exists(bin_cmd):
os.remove(bin_cmd)
def install_tools(fab_cmd, fabfile, fabricrc):
"""Install 3rd party tools used by Gemini using a custom CloudBioLinux flavor.
"""
tools = ["grabix"]
flavor_dir = os.path.join(os.getcwd(), "gemini-flavor")
if not os.path.exists(flavor_dir):
os.makedirs(flavor_dir)
with open(os.path.join(flavor_dir, "main.yaml"), "w") as out_handle:
out_handle.write("packages:\n")
out_handle.write(" - bio_nextgen\n")
out_handle.write("libraries:\n")
with open(os.path.join(flavor_dir, "custom.yaml"), "w") as out_handle:
out_handle.write("bio_nextgen:\n")
for tool in tools:
out_handle.write(" - %s\n" % tool)
cmd = [fab_cmd, "-f", fabfile, "-H", "localhost", "-c", fabricrc,
"install_biolinux:target=custom,flavor=%s" % flavor_dir]
subprocess.check_call(cmd)
def install_data(python_cmd, data_script, args):
"""Install biological data used by gemini.
"""
data_dir = os.path.join(args.datadir, "gemini_data") if args.sharedpy else args.datadir
cmd = [python_cmd, data_script, data_dir]
if args.install_data:
print "Installing gemini data..."
else:
cmd.append("--nodata")
subprocess.check_call(cmd)
def install_testbase(datadir, repo, gemini):
"""Clone or update gemini code so we have the latest test suite.
"""
gemini_dir = os.path.join(datadir, "gemini")
cur_dir = os.getcwd()
needs_git = True
if os.path.exists(gemini_dir):
os.chdir(gemini_dir)
try:
subprocess.check_call(["git", "pull", "origin", "master", "--tags"])
needs_git = False
except:
os.chdir(cur_dir)
shutil.move(gemini_dir, "gtmp")
branch = None
if needs_git:
os.chdir(os.path.split(gemini_dir)[0])
if repo.startswith("git+"):
repo = repo[4:]
if repo.endswith("@dev"):
url, branch = repo.rsplit("@", 1)
subprocess.check_call(["git", "clone", "-b", branch, url])
else:
subprocess.check_call(["git", "clone", repo])
os.makedirs(os.path.join(gemini_dir, "data"))
if os.path.exists(os.path.join(cur_dir, "gtmp", "data")):
for f in os.listdir(os.path.join(cur_dir, "gtmp", "data")):
shutil.move(os.path.join(cur_dir, "gtmp", "data", f), os.path.join(gemini_dir, "data"))
#shutil.move(os.path.join(cur_dir, "gtmp"), gemini_dir)
shutil.rmtree(os.path.join(cur_dir, "gtmp", "data"))
os.chdir(gemini_dir)
if branch is None: # otherwise, we use the test structure at current head.
_update_testdir_revision(gemini["cmd"])
os.chdir(cur_dir)
return os.path.join(gemini_dir, "master-test.sh")
def _update_testdir_revision(gemini_cmd):
"""Update test directory to be in sync with a tagged installed version or development.
"""
try:
p = subprocess.Popen([gemini_cmd, "--version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
gversion = p.communicate()[0].split()[1]
except:
gversion = ""
tag = ""
if gversion:
try:
p = subprocess.Popen("git tag -l | grep %s" % gversion, stdout=subprocess.PIPE, shell=True)
tag = p.communicate()[0].strip()
except:
tag = ""
if tag:
subprocess.check_call(["git", "checkout", "tags/%s" % tag])
pass
else:
subprocess.check_call(["git", "reset", "--hard", "HEAD"])
def write_fabricrc(base_file, tooldir, datadir, use_sudo):
out_file = os.path.join(os.getcwd(), os.path.basename(base_file))
with open(base_file) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("system_install"):
line = "system_install = %s\n" % tooldir
elif line.startswith("local_install"):
line = "local_install = %s/install\n" % tooldir
elif line.startswith("data_files"):
line = "data_files = %s\n" % datadir
elif line.startswith("use_sudo"):
line = "use_sudo = %s\n" % use_sudo
elif line.startswith("edition"):
line = "edition = minimal\n"
elif line.startswith("#galaxy_home"):
line = "galaxy_home = %s\n" % os.path.join(datadir, "galaxy")
out_handle.write(line)
return out_file
def make_dirs(args):
sudo_cmd = ["sudo"] if args.sudo else []
for dname in [args.datadir, args.tooldir]:
if not os.path.exists(dname):
subprocess.check_call(sudo_cmd + ["mkdir", "-p", dname])
username = check_output("echo $USER", shell=True).strip()
subprocess.check_call(sudo_cmd + ["chown", username, dname])
def get_cloudbiolinux(repo):
base_dir = os.path.join(os.getcwd(), "cloudbiolinux")
if not os.path.exists(base_dir):
subprocess.check_call(["git", "clone", repo])
return {"fabricrc": os.path.join(base_dir, "config", "fabricrc.txt"),
"tool_fabfile": os.path.join(base_dir, "fabfile.py")}
def clean_env_variables():
"""Adjust environmental variables which can cause conflicts with installed anaconda python.
"""
for k in ["PYTHONPATH", "PYTHONHOME"]:
os.environ.pop(k, None)
# https://docs.python.org/2/using/cmdline.html#envvar-PYTHONNOUSERSITE
os.environ["PYTHONNOUSERSITE"] = "1"
def check_dependencies():
"""Ensure required tools for installation are present.
"""
print "Checking required dependencies..."
for cmd, url in [("git", "http://git-scm.com/"),
("wget", "http://www.gnu.org/software/wget/"),
("curl", "http://curl.haxx.se/")]:
try:
retcode = subprocess.call([cmd, "--version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
retcode = 127
if retcode == 127:
raise OSError("gemini requires %s (%s)" % (cmd, url))
else:
print " %s found" % cmd
def check_output(*popenargs, **kwargs):
"""python2.6 compatible version of check_output.
Thanks to:
https://github.com/stackforge/bindep/blob/master/bindep/support_py26.py
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Automated installer for gemini framework.")
parser.add_argument("tooldir", help="Directory to install 3rd party software tools",
type=os.path.abspath)
parser.add_argument("datadir", help="Directory to install gemini data files",
type=os.path.abspath)
parser.add_argument("--gemini-version", dest="gemini_version", default="latest",
help="Install one specific gemini version with a fixed dependency chain.")
parser.add_argument("--nosudo", help="Specify we cannot use sudo for commands",
dest="sudo", action="store_false", default=True)
parser.add_argument("--notools", help="Do not install tool dependencies",
dest="install_tools", action="store_false", default=True)
parser.add_argument("--nodata", help="Do not install data dependencies",
dest="install_data", action="store_false", default=True)
parser.add_argument("--sharedpy", help=("Indicate we share an Anaconda Python directory with "
"another project. Creates unique gemini data directory."),
action="store_true", default=False)
if len(sys.argv) == 1:
parser.print_help()
else:
main(parser.parse_args())
| mit |
machinelearningnanodegree/stanford-cs231 | solutions/levin/assignment2/FullyConectedNets/batchnormalization.py | 1 | 14980 | import sys
import os
from astropy.units import ys
sys.path.insert(0, os.path.abspath('..'))
import random
import numpy as np
import matplotlib.pyplot as plt
from assignment2.cs231n.layers import affine_forward
from assignment2.cs231n.layers import affine_backward
from assignment2.cs231n.layers import relu_forward
from assignment2.cs231n.layers import relu_backward
from assignment2.cs231n.layers import svm_loss
from assignment2.cs231n.layers import softmax_loss
from assignment2.cs231n.classifiers.fc_net import *
from assignment2.cs231n.data_utils import get_CIFAR10_data
from assignment2.cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from assignment2.cs231n.solver import Solver
from assignment2.cs231n.layer_utils import affine_relu_forward, affine_relu_backward
from assignment2.cs231n.data_utils import load_CIFAR10
from assignment2.cs231n.optim import sgd_momentum
from assignment2.cs231n.optim import rmsprop
from assignment2.cs231n.optim import adam
import time
class BatchNormalization(object):
def __init__(self):
return
def rel_error(self, x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def test_batch_norm_forward_train_time(self):
# Check the training-time forward pass by checking means and variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
N, D1, D2, D3 = 200, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print 'Before batch normalization:'
print ' means: ', a.mean(axis=0)
print ' stds: ', a.std(axis=0)
# Means should be close to zero and stds close to one
print 'After batch normalization (gamma=1, beta=0)'
a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})
print ' mean: ', a_norm.mean(axis=0)
print ' std: ', a_norm.std(axis=0)
# Now means should be close to beta and stds close to gamma
gamma = np.asarray([1.0, 2.0, 3.0])
beta = np.asarray([11.0, 12.0, 13.0])
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print 'After batch normalization (nontrivial gamma, beta)'
print ' means: ', a_norm.mean(axis=0)
print ' stds: ', a_norm.std(axis=0)
return
def test_batch_norm_forward_test_time(self):
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
N, D1, D2, D3 = 200, 50, 60, 3
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
bn_param = {'mode': 'train'}
gamma = np.ones(D3)
beta = np.zeros(D3)
for t in xrange(50):
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
batchnorm_forward(a, gamma, beta, bn_param)
bn_param['mode'] = 'test'
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print 'After batch normalization (test-time):'
print ' means: ', a_norm.mean(axis=0)
print ' stds: ', a_norm.std(axis=0)
return
def get_CIFAR10_data(self, num_training=49000, num_validation=1000, num_test=1000):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = '../../assignment1/cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Reshape data to rows
X_train = X_train.reshape(num_training, -1)
X_val = X_val.reshape(num_validation, -1)
X_test = X_test.reshape(num_test, -1)
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# self.X_train = X_train
# self.y_train = y_train
# self.X_val = X_val
# self.y_val = y_val
# self.X_test = X_test
# self.y_test = y_test
self.data = {
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val}
return X_train, y_train, X_val, y_val,X_test,y_test
def backnorm_backward(self):
# Gradient check batchnorm backward pass
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
_, cache = batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = batchnorm_backward(dout, cache)
print 'dx error: ', self.rel_error(dx_num, dx)
print 'dgamma error: ', self.rel_error(da_num, dgamma)
print 'dbeta error: ', self.rel_error(db_num, dbeta)
return
def analytical_backward(self):
N, D = 100, 500
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
out, cache = batchnorm_forward(x, gamma, beta, bn_param)
t1 = time.time()
dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)
t2 = time.time()
dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)
t3 = time.time()
print 'dx difference: ', self.rel_error(dx1, dx2)
print 'dgamma difference: ', self.rel_error(dgamma1, dgamma2)
print 'dbeta difference: ', self.rel_error(dbeta1, dbeta2)
print 'speedup: %.2fx' % ((t2 - t1) / (t3 - t2))
return
def check_network_withbatchnorm(self):
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print 'Running check with reg = ', reg
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64,
use_batchnorm=True)
loss, grads = model.loss(X, y)
print 'Initial loss: ', loss
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print '%s relative error: %.2e' % (name, self.rel_error(grad_num, grads[name]))
if reg == 0: print
return
def batch_norm_with_deep(self):
# Try training a very deep net with batchnorm
data = self.data
hidden_dims = [100, 100, 100, 100, 100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 2e-2
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
bn_solver.train()
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
solver.train()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label='baseline')
plt.plot(bn_solver.loss_history, 'o', label='batchnorm')
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label='baseline')
plt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label='baseline')
plt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
return
def weight_initialization_batch_norm(self):
# Try training a very deep net with batchnorm
data = self.data
hidden_dims = [50, 50, 50, 50, 50, 50, 50]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
bn_solvers = {}
solvers = {}
weight_scales = np.logspace(-4, 0, num=20)
for i, weight_scale in enumerate(weight_scales):
print 'Running weight scale %d / %d' % (i + 1, len(weight_scales))
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
bn_solver.train()
bn_solvers[weight_scale] = bn_solver
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
solver.train()
solvers[weight_scale] = solver
# Plot results of weight scale experiment
best_train_accs, bn_best_train_accs = [], []
best_val_accs, bn_best_val_accs = [], []
final_train_loss, bn_final_train_loss = [], []
for ws in weight_scales:
best_train_accs.append(max(solvers[ws].train_acc_history))
bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))
best_val_accs.append(max(solvers[ws].val_acc_history))
bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))
final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))
bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))
plt.subplot(3, 1, 1)
plt.title('Best val accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best val accuracy')
plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
plt.title('Best train accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best training accuracy')
plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')
plt.legend()
plt.subplot(3, 1, 3)
plt.title('Final training loss vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Final training loss')
plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')
plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')
plt.legend()
plt.gcf().set_size_inches(10, 15)
plt.show()
return
def run(self):
self.get_CIFAR10_data()
# self.test_batch_norm_forward_train_time()
# self.test_batch_norm_forward_test_time()
# self.backnorm_backward()
# self.analytical_backward()
# self.check_network_withbatchnorm()
# self.batch_norm_with_deep()
self.weight_initialization_batch_norm()
return
if __name__ == "__main__":
obj= BatchNormalization()
obj.run() | mit |
MicheleDamian/ConnectopicMapping | setup.py | 1 | 1540 | from codecs import open
from os import path
from setuptools import setup, Extension
from Cython.Distutils import build_ext
import numpy
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Requirements
install_requires=['cython>=0.24.1',
'numpy>=1.6.1',
'scipy>=0.16',
'matplotlib>=1.5.1',
'scikit-learn>=0.17.1',
'nibabel>=2.0.2',
'nilearn>=0.2.4',
'GPy>=1.0.7']
setup(
name='connectopic_mapping',
version='0.3.0',
description='Connectopic mapping',
long_description=long_description,
author='Michele Damian',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='neuroscience connectopic mapping research',
packages=['connectopic_mapping'],
install_requires=install_requires,
cmdclass={'build_ext': build_ext},
ext_modules=[Extension("connectopic_mapping.haak", ["connectopic_mapping/haak.pyx"], include_dirs=[numpy.get_include()])],
)
| apache-2.0 |
yarikoptic/pystatsmodels | statsmodels/examples/l1_demo/short_demo.py | 3 | 3644 | """
You can fit your LikelihoodModel using l1 regularization by changing
the method argument and adding an argument alpha. See code for
details.
The Story
---------
The maximum likelihood (ML) solution works well when the number of data
points is large and the noise is small. When the ML solution starts
"breaking", the regularized solution should do better.
The l1 Solvers
--------------
The standard l1 solver is fmin_slsqp and is included with scipy. It
sometimes has trouble verifying convergence when the data size is
large.
The l1_cvxopt_cp solver is part of CVXOPT and this package needs to be
installed separately. It works well even for larger data sizes.
"""
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
import pdb # pdb.set_trace()
## Load the data from Spector and Mazzeo (1980)
spector_data = sm.datasets.spector.load()
spector_data.exog = sm.add_constant(spector_data.exog)
N = len(spector_data.endog)
K = spector_data.exog.shape[1]
### Logit Model
logit_mod = sm.Logit(spector_data.endog, spector_data.exog)
## Standard logistic regression
logit_res = logit_mod.fit()
## Regularized regression
# Set the reularization parameter to something reasonable
alpha = 0.05 * N * np.ones(K)
# Use l1, which solves via a built-in (scipy.optimize) solver
logit_l1_res = logit_mod.fit_regularized(method='l1', alpha=alpha, acc=1e-6)
# Use l1_cvxopt_cp, which solves with a CVXOPT solver
logit_l1_cvxopt_res = logit_mod.fit_regularized(
method='l1_cvxopt_cp', alpha=alpha)
## Print results
print "============ Results for Logit ================="
print "ML results"
print logit_res.summary()
print "l1 results"
print logit_l1_res.summary()
print logit_l1_cvxopt_res.summary()
### Multinomial Logit Example using American National Election Studies Data
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
mlogit_res = mlogit_mod.fit()
## Set the regularization parameter.
alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))
# Don't regularize the constant
alpha[-1,:] = 0
mlogit_l1_res = mlogit_mod.fit_regularized(method='l1', alpha=alpha)
print mlogit_l1_res.params
#mlogit_l1_res = mlogit_mod.fit_regularized(
# method='l1_cvxopt_cp', alpha=alpha, abstol=1e-10, trim_tol=1e-6)
#print mlogit_l1_res.params
## Print results
print "============ Results for MNLogit ================="
print "ML results"
print mlogit_res.summary()
print "l1 results"
print mlogit_l1_res.summary()
#
#
#### Logit example with many params, sweeping alpha
spector_data = sm.datasets.spector.load()
X = spector_data.exog
Y = spector_data.endog
## Fit
N = 50 # number of points to solve at
K = X.shape[1]
logit_mod = sm.Logit(Y, X)
coeff = np.zeros((N, K)) # Holds the coefficients
alphas = 1 / np.logspace(-0.5, 2, N)
## Sweep alpha and store the coefficients
# QC check doesn't always pass with the default options.
# Use the options QC_verbose=True and disp=True
# to to see what is happening. It just barely doesn't pass, so I decreased
# acc and increased QC_tol to make it pass
for n, alpha in enumerate(alphas):
logit_res = logit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='off', QC_tol=0.1, disp=False,
QC_verbose=True, acc=1e-15)
coeff[n,:] = logit_res.params
## Plot
plt.figure(1);plt.clf();plt.grid()
plt.title('Regularization Path');
plt.xlabel('alpha');
plt.ylabel('Parameter value');
for i in xrange(K):
plt.plot(alphas, coeff[:,i], label='X'+str(i), lw=3)
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
equialgo/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 105 | 4300 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
siutanwong/scikit-learn | sklearn/ensemble/tests/test_forest.py | 48 | 35412 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert():
classifier = RandomForestClassifier()
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y) | bsd-3-clause |
dewtx29/python_ann | project/num/c++/forword.py | 1 | 1567 | from math import cos,sin, factorial
import random
import matplotlib.pyplot as plt
def randominput(n):
listA = []
for i in range(0,n):
ran = random.random()
listB = [i, ran]
listA.append( listB)
return listA
def forward(listinput, x):
dataSize = len(listinput)
#print listinput , dataSize
summation = 0.0
for i in range (0,dataSize):
tmp = f0(x,i)
term = 1.0
for j in range (0, i):
term = term * (x - listinput[j][0])
summation = summation + (tmp* term)
return summation
#print "summation" , summation
def f0(x,dataSize):
f=0.0
if dataSize % 4 == 0:
f = sin(x)
elif dataSize % 4 == 1:
f = cos(x)
elif dataSize % 4 == 2:
f = -sin(x)
elif dataSize == 3:
f = -cos(x)
return f
dotList = []
dotList2 = []
xList = []
cur = 0.0
while (cur < 50.0 ) :
y = sin(cur)
# dotList.append(y)
tmpList = [cur, y]
xList.append(tmpList)
cur = cur + 1
listinput = randominput(20)
print listinput
for i in range(0,len(listinput)):
dotList.append(listinput[i][1])
for i in range (0 , len (listinput)):
#print xList[i]\
y = forward(listinput , listinput[i][0]);
#print y
dotList2.append(y)
dotList = dotList[0:len(dotList)-3]
dotList2 = dotList2[0:len(dotList2)-3]
plt.plot(dotList, 'ro',color="g")
plt.plot(dotList2, 'ro',color="r")
#plt.axis([0, 1000, 0, 1000])
plt.show()
forward(listinput,1.0)
| gpl-3.0 |
alexsavio/scikit-learn | sklearn/learning_curve.py | 7 | 15161 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0,
error_score='raise'):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True,
error_score=error_score)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<sphx_glr_auto_examples_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
PatrickChrist/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
elkingtonmcb/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/numpy/linalg/linalg.py | 3 | 82838 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import lapack_lite, _umath_linalg
# For Python2/3 compatibility
_N = b'N'
_V = b'V'
_A = b'A'
_S = b'S'
_L = b'L'
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
def _determine_error_states():
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
return [bufsize, invalid_call_errmask, None]
# Dealing with errors in _umath_linalg
_linalg_error_extobj = _determine_error_states()
del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def _raise_linalgerror_lstsq(err, flag):
raise LinAlgError("SVD did not converge in Linear Least Squares")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
def transpose(a):
"""
Transpose each matrix in a stack of matrices.
Unlike np.transpose, this only swaps the last two axes, rather than all of
them
Parameters
----------
a : (...,M,N) array_like
Returns
-------
aT : (...,N,M) ndarray
"""
return swapaxes(a, -1, -2)
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
def matrix_power(a, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
.. note:: Stacks of object matrices are not currently supported.
Parameters
----------
a : (..., M, M) array_like
Matrix to be "powered."
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
a**n : (..., M, M) ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
For matrices that are not square or that (for negative powers) cannot
be inverted numerically.
Examples
--------
>>> from numpy.linalg import matrix_power
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
a = asanyarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
try:
n = operator.index(n)
except TypeError:
raise TypeError("exponent must be an integer")
# Fall back on dot for object arrays. Object arrays are not supported by
# the current implementation of matmul using einsum
if a.dtype != object:
fmatmul = matmul
elif a.ndim == 2:
fmatmul = dot
else:
raise NotImplementedError(
"matrix_power not supported for stacks of object arrays")
if n == 0:
a = empty_like(a)
a[...] = eye(a.shape[-2], dtype=a.dtype)
return a
elif n < 0:
a = inv(a)
n = abs(n)
# short-cuts.
if n == 1:
return a
elif n == 2:
return fmatmul(a, a)
elif n == 3:
return fmatmul(fmatmul(a, a), a)
# Use binary decomposition to reduce the number of matrix multiplications.
# Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
# increasing powers of 2, and multiply into the result as needed.
z = result = None
while n > 0:
z = a if z is None else fmatmul(z, z)
n, bit = divmod(n, 2)
if bit:
result = z if result is None else fmatmul(result, z)
return result
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
* 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
* 'complete' : returns q, r with dimensions (M, M), (M, N)
* 'r' : returns r only with dimensions (K, N)
* 'raw' : returns h, tau with dimensions (N, M), (K,)
* 'full' : alias of 'reduced', deprecated
* 'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=True, compute_uv=True):
"""
Singular Value Decomposition.
When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh
= (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D
array of `a`'s singular values. When `a` is higher-dimensional, SVD is
applied in stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex array with ``a.ndim >= 2``.
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
s : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionchanged:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine ``_gesdd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``np.matmul`` for python versions below 3.5.)
If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
>>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
Reconstruction based on full SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((9, 9), (6,), (6, 6))
>>> np.allclose(a, np.dot(u[:, :6] * s, vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
>>> smat[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on reduced SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(u * s, vh))
True
>>> smat = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on full SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
True
Reconstruction based on reduced SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u, s[..., None] * vh))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vh = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vh = vh.astype(result_t, copy=False)
return wrap(u), s, wrap(vh)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
if p == -2:
r = s[..., -1] / s[..., 0]
else:
r = s[..., 0] / s[..., -1]
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
_assertRankAtLeast2(x)
_assertNdSquareness(x)
t, result_t = _commonType(x)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
invx = _umath_linalg.inv(x, signature=signature)
r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
r = r.astype(result_t, copy=False)
# Convert nans to infs unless the original array had nan entries
r = asarray(r)
nan_mask = isnan(r)
if nan_mask.any():
nan_mask &= ~isnan(x).any(axis=(-2, -1))
if r.ndim > 0:
r[nan_mask] = Inf
elif nan_mask:
r[()] = Inf
# Convention is to return scalars instead of 0d arrays
if r.ndim == 0:
r = r[()]
return r
def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of singular values of the array that are
greater than `tol`.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
tol : (...) array_like, float, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
.. versionchanged:: 1.14
Broadcasted against the stack of matrices
hermitian : bool, optional
If True, `M` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.14
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
if hermitian:
S = abs(eigvalsh(M))
else:
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
else:
tol = asarray(tol)[..., newaxis]
return count_nonzero(S > tol, axis=-1)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
a : (..., M, N) array_like
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero. Broadcasts against the stack of matrices
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _isEmpty2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
large = s > cutoff
s = divide(1, s, where=large, out=s)
s[~large] = 0
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
sign = sign.astype(result_t, copy=False)
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond="warn"):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
.. versionchanged:: 1.14.0
If not set, a FutureWarning is given. The previous default
of ``-1`` will use the machine precision as `rcond` parameter,
the new default will use the machine precision times `max(M, N)`.
To silence the warning and use the new default, use ``rcond=None``,
to keep using the old behavior, use ``rcond=-1``.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
real_t = _linalgRealType(t)
result_real_t = _realType(result_t)
# Determine default rcond value
if rcond == "warn":
# 2017-08-19, 1.14.0
warnings.warn("`rcond` parameter will change to the default of "
"machine precision times ``max(M, N)`` where M and N "
"are the input matrix dimensions.\n"
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
FutureWarning, stacklevel=2)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * max(n, m)
if m <= n:
gufunc = _umath_linalg.lstsq_m
else:
gufunc = _umath_linalg.lstsq_n
signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
# remove the axis we added
if is_1d:
x = x.squeeze(axis=-1)
# we probably should squeeze resids too, but we can't
# without breaking compatibility.
# as documented
if rank != n or m <= n:
resids = array([], result_real_t)
# coerce output arrays
s = s.astype(result_real_t, copy=False)
resids = resids.astype(result_real_t, copy=False)
x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed
return wrap(x), wrap(resids), rank, s
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
ret **= (1 / ord)
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| mit |
theakholic/ThinkStats2 | code/chap12soln.py | 68 | 4459 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import thinkplot
import thinkstats2
import regression
import timeseries
def RunQuadraticModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
daily['years2'] = daily.years**2
model = smf.ols('ppg ~ years + years2', data=daily)
results = model.fit()
return model, results
def PlotQuadraticModel(daily, name):
"""
"""
model, results = RunQuadraticModel(daily)
regression.SummarizeResults(results)
timeseries.PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries11',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)')
timeseries.PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries12',
title='residuals',
xlabel='years',
ylabel='price per gram ($)')
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
timeseries.PlotPredictions(daily, years, func=RunQuadraticModel)
thinkplot.Save(root='timeseries13',
title='predictions',
xlabel='years',
xlim=[years[0]-0.1, years[-1]+0.1],
ylabel='price per gram ($)')
def PlotEwmaPredictions(daily, name):
"""
"""
# use EWMA to estimate slopes
filled = timeseries.FillMissing(daily)
filled['slope'] = pandas.ewma(filled.ppg.diff(), span=180)
filled[-1:]
# extract the last inter and slope
start = filled.index[-1]
inter = filled.ewma[-1]
slope = filled.slope[-1]
# reindex the DataFrame, adding a year to the end
dates = pandas.date_range(filled.index.min(),
filled.index.max() + np.timedelta64(365, 'D'))
predicted = filled.reindex(dates)
# generate predicted values and add them to the end
predicted['date'] = predicted.index
one_day = np.timedelta64(1, 'D')
predicted['days'] = (predicted.date - start) / one_day
predict = inter + slope * predicted.days
predicted.ewma.fillna(predict, inplace=True)
# plot the actual values and predictions
thinkplot.Scatter(daily.ppg, alpha=0.1, label=name)
thinkplot.Plot(predicted.ewma)
thinkplot.Save()
class SerialCorrelationTest(thinkstats2.HypothesisTest):
"""Tests serial correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
series, lag = data
test_stat = abs(thinkstats2.SerialCorr(series, lag))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
series, lag = self.data
permutation = series.reindex(np.random.permutation(series.index))
return permutation, lag
def TestSerialCorr(daily):
"""Tests serial correlations in daily prices and their residuals.
daily: DataFrame of daily prices
"""
# test the correlation between consecutive prices
series = daily.ppg
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the linear model
_, results = timeseries.RunLinearModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the quadratic model
_, results = RunQuadraticModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
def main(name):
transactions = timeseries.ReadData()
dailies = timeseries.GroupByQualityAndDay(transactions)
name = 'high'
daily = dailies[name]
PlotQuadraticModel(daily, name)
TestSerialCorr(daily)
PlotEwmaPredictions(daily, name)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/numpy/lib/polynomial.py | 82 | 37957 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| artistic-2.0 |
rrrrrr8/vnpy | vnpy/trader/app/ctaStrategy/ctaBacktesting.py | 1 | 56364 | # encoding: UTF-8
'''
本文件中包含的是CTA模块的回测引擎,回测引擎的API和CTA引擎一致,
可以使用和实盘相同的代码进行回测。
'''
from __future__ import division
from __future__ import print_function
from datetime import datetime, timedelta
from collections import OrderedDict
from itertools import product
import multiprocessing
import copy
import pymongo
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from vnpy.rpc import RpcClient, RpcServer, RemoteException
# 如果安装了seaborn则设置为白色风格
try:
import seaborn as sns
sns.set_style('whitegrid')
except ImportError:
pass
from vnpy.trader.vtGlobal import globalSetting
from vnpy.trader.vtObject import VtTickData, VtBarData
from vnpy.trader.vtConstant import *
from vnpy.trader.vtGateway import VtOrderData, VtTradeData
from .ctaBase import *
########################################################################
class BacktestingEngine(object):
"""
CTA回测引擎
函数接口和策略引擎保持一样,
从而实现同一套代码从回测到实盘。
"""
TICK_MODE = 'tick'
BAR_MODE = 'bar'
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 本地停止单
self.stopOrderCount = 0 # 编号计数:stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典, key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
self.engineType = ENGINETYPE_BACKTESTING # 引擎类型为回测
self.strategy = None # 回测策略
self.mode = self.BAR_MODE # 回测模式,默认为K线
self.startDate = ''
self.initDays = 0
self.endDate = ''
self.capital = 1000000 # 回测时的起始本金(默认100万)
self.slippage = 0 # 回测时假设的滑点
self.rate = 0 # 回测时假设的佣金比例(适用于百分比佣金)
self.size = 1 # 合约大小,默认为1
self.priceTick = 0 # 价格最小变动
self.dbClient = None # 数据库客户端
self.dbCursor = None # 数据库指针
self.hdsClient = None # 历史数据服务器客户端
self.initData = [] # 初始化用的数据
self.dbName = '' # 回测数据库名
self.symbol = '' # 回测集合名
self.dataStartDate = None # 回测数据开始日期,datetime对象
self.dataEndDate = None # 回测数据结束日期,datetime对象
self.strategyStartDate = None # 策略启动日期(即前面的数据用于初始化),datetime对象
self.limitOrderCount = 0 # 限价单编号
self.limitOrderDict = OrderedDict() # 限价单字典
self.workingLimitOrderDict = OrderedDict() # 活动限价单字典,用于进行撮合用
self.tradeCount = 0 # 成交编号
self.tradeDict = OrderedDict() # 成交字典
self.logList = [] # 日志记录
# 当前最新数据,用于模拟成交用
self.tick = None
self.bar = None
self.dt = None # 最新的时间
# 日线回测结果计算用
self.dailyResultDict = OrderedDict()
#------------------------------------------------
# 通用功能
#------------------------------------------------
#----------------------------------------------------------------------
def roundToPriceTick(self, price):
"""取整价格到合约最小价格变动"""
if not self.priceTick:
return price
newPrice = round(price/self.priceTick, 0) * self.priceTick
return newPrice
#----------------------------------------------------------------------
def output(self, content):
"""输出内容"""
print(str(datetime.now()) + "\t" + content)
#------------------------------------------------
# 参数设置相关
#------------------------------------------------
#----------------------------------------------------------------------
def setStartDate(self, startDate='20100416', initDays=10):
"""设置回测的启动日期"""
self.startDate = startDate
self.initDays = initDays
self.dataStartDate = datetime.strptime(startDate, '%Y%m%d')
initTimeDelta = timedelta(initDays)
self.strategyStartDate = self.dataStartDate + initTimeDelta
#----------------------------------------------------------------------
def setEndDate(self, endDate=''):
"""设置回测的结束日期"""
self.endDate = endDate
if endDate:
self.dataEndDate = datetime.strptime(endDate, '%Y%m%d')
# 若不修改时间则会导致不包含dataEndDate当天数据
self.dataEndDate = self.dataEndDate.replace(hour=23, minute=59)
#----------------------------------------------------------------------
def setBacktestingMode(self, mode):
"""设置回测模式"""
self.mode = mode
#----------------------------------------------------------------------
def setDatabase(self, dbName, symbol):
"""设置历史数据所用的数据库"""
self.dbName = dbName
self.symbol = symbol
#----------------------------------------------------------------------
def setCapital(self, capital):
"""设置资本金"""
self.capital = capital
#----------------------------------------------------------------------
def setSlippage(self, slippage):
"""设置滑点点数"""
self.slippage = slippage
#----------------------------------------------------------------------
def setSize(self, size):
"""设置合约大小"""
self.size = size
#----------------------------------------------------------------------
def setRate(self, rate):
"""设置佣金比例"""
self.rate = rate
#----------------------------------------------------------------------
def setPriceTick(self, priceTick):
"""设置价格最小变动"""
self.priceTick = priceTick
#------------------------------------------------
# 数据回放相关
#------------------------------------------------
#----------------------------------------------------------------------
def initHdsClient(self):
"""初始化历史数据服务器客户端"""
reqAddress = 'tcp://localhost:5555'
subAddress = 'tcp://localhost:7777'
self.hdsClient = RpcClient(reqAddress, subAddress)
self.hdsClient.start()
#----------------------------------------------------------------------
def loadHistoryData(self):
"""载入历史数据"""
self.dbClient = pymongo.MongoClient(globalSetting['mongoHost'], globalSetting['mongoPort'])
collection = self.dbClient[self.dbName][self.symbol]
self.output(u'开始载入数据')
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = VtBarData
func = self.newBar
else:
dataClass = VtTickData
func = self.newTick
# 载入初始化需要用的数据
if self.hdsClient:
initCursor = self.hdsClient.loadHistoryData(self.dbName,
self.symbol,
self.dataStartDate,
self.strategyStartDate)
else:
flt = {'datetime':{'$gte':self.dataStartDate,
'$lt':self.strategyStartDate}}
initCursor = collection.find(flt).sort('datetime')
# 将数据从查询指针中读取出,并生成列表
self.initData = [] # 清空initData列表
for d in initCursor:
data = dataClass()
data.__dict__ = d
self.initData.append(data)
# 载入回测数据
if self.hdsClient:
self.dbCursor = self.hdsClient.loadHistoryData(self.dbName,
self.symbol,
self.strategyStartDate,
self.dataEndDate)
else:
if not self.dataEndDate:
flt = {'datetime':{'$gte':self.strategyStartDate}} # 数据过滤条件
else:
flt = {'datetime':{'$gte':self.strategyStartDate,
'$lte':self.dataEndDate}}
self.dbCursor = collection.find(flt).sort('datetime')
if isinstance(self.dbCursor, list):
count = len(initCursor) + len(self.dbCursor)
else:
count = initCursor.count() + self.dbCursor.count()
self.output(u'载入完成,数据量:%s' %count)
#----------------------------------------------------------------------
def runBacktesting(self):
"""运行回测"""
# 载入历史数据
self.loadHistoryData()
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = VtBarData
func = self.newBar
else:
dataClass = VtTickData
func = self.newTick
self.output(u'开始回测')
self.strategy.onInit()
self.strategy.inited = True
self.output(u'策略初始化完成')
self.strategy.trading = True
self.strategy.onStart()
self.output(u'策略启动完成')
self.output(u'开始回放数据')
for d in self.dbCursor:
data = dataClass()
data.__dict__ = d
func(data)
self.output(u'数据回放结束')
#----------------------------------------------------------------------
def newBar(self, bar):
"""新的K线"""
self.bar = bar
self.dt = bar.datetime
self.crossLimitOrder() # 先撮合限价单
self.crossStopOrder() # 再撮合停止单
self.strategy.onBar(bar) # 推送K线到策略中
self.updateDailyClose(bar.datetime, bar.close)
#----------------------------------------------------------------------
def newTick(self, tick):
"""新的Tick"""
self.tick = tick
self.dt = tick.datetime
self.crossLimitOrder()
self.crossStopOrder()
self.strategy.onTick(tick)
self.updateDailyClose(tick.datetime, tick.lastPrice)
#----------------------------------------------------------------------
def initStrategy(self, strategyClass, setting=None):
"""
初始化策略
setting是策略的参数设置,如果使用类中写好的默认设置则可以不传该参数
"""
self.strategy = strategyClass(self, setting)
self.strategy.name = self.strategy.className
#----------------------------------------------------------------------
def crossLimitOrder(self):
"""基于最新数据撮合限价单"""
# 先确定会撮合成交的价格
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.low # 若买入方向限价单价格高于该价格,则会成交
sellCrossPrice = self.bar.high # 若卖出方向限价单价格低于该价格,则会成交
buyBestCrossPrice = self.bar.open # 在当前时间点前发出的买入委托可能的最优成交价
sellBestCrossPrice = self.bar.open # 在当前时间点前发出的卖出委托可能的最优成交价
else:
buyCrossPrice = self.tick.askPrice1
sellCrossPrice = self.tick.bidPrice1
buyBestCrossPrice = self.tick.askPrice1
sellBestCrossPrice = self.tick.bidPrice1
# 遍历限价单字典中的所有限价单
for orderID, order in self.workingLimitOrderDict.items():
# 推送委托进入队列(未成交)的状态更新
if not order.status:
order.status = STATUS_NOTTRADED
self.strategy.onOrder(order)
# 判断是否会成交
buyCross = (order.direction==DIRECTION_LONG and
order.price>=buyCrossPrice and
buyCrossPrice > 0) # 国内的tick行情在涨停时askPrice1为0,此时买无法成交
sellCross = (order.direction==DIRECTION_SHORT and
order.price<=sellCrossPrice and
sellCrossPrice > 0) # 国内的tick行情在跌停时bidPrice1为0,此时卖无法成交
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = order.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
trade.orderID = order.orderID
trade.vtOrderID = order.orderID
trade.direction = order.direction
trade.offset = order.offset
# 以买入为例:
# 1. 假设当根K线的OHLC分别为:100, 125, 90, 110
# 2. 假设在上一根K线结束(也是当前K线开始)的时刻,策略发出的委托为限价105
# 3. 则在实际中的成交价会是100而不是105,因为委托发出时市场的最优价格是100
if buyCross:
trade.price = min(order.price, buyBestCrossPrice)
self.strategy.pos += order.totalVolume
else:
trade.price = max(order.price, sellBestCrossPrice)
self.strategy.pos -= order.totalVolume
trade.volume = order.totalVolume
trade.tradeTime = self.dt.strftime('%H:%M:%S')
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
order.tradedVolume = order.totalVolume
order.status = STATUS_ALLTRADED
self.strategy.onOrder(order)
# 从字典中删除该限价单
if orderID in self.workingLimitOrderDict:
del self.workingLimitOrderDict[orderID]
#----------------------------------------------------------------------
def crossStopOrder(self):
"""基于最新数据撮合停止单"""
# 先确定会撮合成交的价格,这里和限价单规则相反
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.high # 若买入方向停止单价格低于该价格,则会成交
sellCrossPrice = self.bar.low # 若卖出方向限价单价格高于该价格,则会成交
bestCrossPrice = self.bar.open # 最优成交价,买入停止单不能低于,卖出停止单不能高于
else:
buyCrossPrice = self.tick.lastPrice
sellCrossPrice = self.tick.lastPrice
bestCrossPrice = self.tick.lastPrice
# 遍历停止单字典中的所有停止单
for stopOrderID, so in self.workingStopOrderDict.items():
# 判断是否会成交
buyCross = so.direction==DIRECTION_LONG and so.price<=buyCrossPrice
sellCross = so.direction==DIRECTION_SHORT and so.price>=sellCrossPrice
# 如果发生了成交
if buyCross or sellCross:
# 更新停止单状态,并从字典中删除该停止单
so.status = STOPORDER_TRIGGERED
if stopOrderID in self.workingStopOrderDict:
del self.workingStopOrderDict[stopOrderID]
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = so.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
if buyCross:
self.strategy.pos += so.volume
trade.price = max(bestCrossPrice, so.price)
else:
self.strategy.pos -= so.volume
trade.price = min(bestCrossPrice, so.price)
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
trade.orderID = orderID
trade.vtOrderID = orderID
trade.direction = so.direction
trade.offset = so.offset
trade.volume = so.volume
trade.tradeTime = self.dt.strftime('%H:%M:%S')
trade.dt = self.dt
self.tradeDict[tradeID] = trade
# 推送委托数据
order = VtOrderData()
order.vtSymbol = so.vtSymbol
order.symbol = so.vtSymbol
order.orderID = orderID
order.vtOrderID = orderID
order.direction = so.direction
order.offset = so.offset
order.price = so.price
order.totalVolume = so.volume
order.tradedVolume = so.volume
order.status = STATUS_ALLTRADED
order.orderTime = trade.tradeTime
self.limitOrderDict[orderID] = order
# 按照顺序推送数据
self.strategy.onStopOrder(so)
self.strategy.onOrder(order)
self.strategy.onTrade(trade)
#------------------------------------------------
# 策略接口相关
#------------------------------------------------
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发单"""
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
order = VtOrderData()
order.vtSymbol = vtSymbol
order.price = self.roundToPriceTick(price)
order.totalVolume = volume
order.orderID = orderID
order.vtOrderID = orderID
order.orderTime = self.dt.strftime('%H:%M:%S')
# CTA委托类型映射
if orderType == CTAORDER_BUY:
order.direction = DIRECTION_LONG
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
order.direction = DIRECTION_LONG
order.offset = OFFSET_CLOSE
# 保存到限价单字典中
self.workingLimitOrderDict[orderID] = order
self.limitOrderDict[orderID] = order
return [orderID]
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
if vtOrderID in self.workingLimitOrderDict:
order = self.workingLimitOrderDict[vtOrderID]
order.status = STATUS_CANCELLED
order.cancelTime = self.dt.strftime('%H:%M:%S')
self.strategy.onOrder(order)
del self.workingLimitOrderDict[vtOrderID]
#----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.price = self.roundToPriceTick(price)
so.volume = volume
so.strategy = strategy
so.status = STOPORDER_WAITING
so.stopOrderID = stopOrderID
if orderType == CTAORDER_BUY:
so.direction = DIRECTION_LONG
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = DIRECTION_LONG
so.offset = OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so
self.workingStopOrderDict[stopOrderID] = so
# 推送停止单初始更新
self.strategy.onStopOrder(so)
return [stopOrderID]
#----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单"""
# 检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
so.status = STOPORDER_CANCELLED
del self.workingStopOrderDict[stopOrderID]
self.strategy.onStopOrder(so)
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""发送策略更新事件,回测中忽略"""
pass
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""考虑到回测中不允许向数据库插入数据,防止实盘交易中的一些代码出错"""
pass
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Bar"""
return self.initData
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Tick"""
return self.initData
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录日志"""
log = str(self.dt) + ' ' + content
self.logList.append(log)
#----------------------------------------------------------------------
def cancelAll(self, name):
"""全部撤单"""
# 撤销限价单
for orderID in self.workingLimitOrderDict.keys():
self.cancelOrder(orderID)
# 撤销停止单
for stopOrderID in self.workingStopOrderDict.keys():
self.cancelStopOrder(stopOrderID)
#----------------------------------------------------------------------
def saveSyncData(self, strategy):
"""保存同步数据(无效)"""
pass
#----------------------------------------------------------------------
def getPriceTick(self, strategy):
"""获取最小价格变动"""
return self.priceTick
#------------------------------------------------
# 结果计算相关
#------------------------------------------------
#----------------------------------------------------------------------
def calculateBacktestingResult(self):
"""
计算回测结果
"""
self.output(u'计算回测结果')
# 检查成交记录
if not self.tradeDict:
self.output(u'成交记录为空,无法计算回测结果')
return {}
# 首先基于回测后的成交记录,计算每笔交易的盈亏
resultList = [] # 交易结果列表
longTrade = [] # 未平仓的多头交易
shortTrade = [] # 未平仓的空头交易
tradeTimeList = [] # 每笔成交时间戳
posList = [0] # 每笔成交后的持仓情况
for trade in self.tradeDict.values():
# 复制成交对象,因为下面的开平仓交易配对涉及到对成交数量的修改
# 若不进行复制直接操作,则计算完后所有成交的数量会变成0
trade = copy.copy(trade)
# 多头交易
if trade.direction == DIRECTION_LONG:
# 如果尚无空头交易
if not shortTrade:
longTrade.append(trade)
# 当前多头交易为平空
else:
while True:
entryTrade = shortTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
-closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([-1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
shortTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not shortTrade:
longTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 空头交易
else:
# 如果尚无多头交易
if not longTrade:
shortTrade.append(trade)
# 当前空头交易为平多
else:
while True:
entryTrade = longTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
longTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not longTrade:
shortTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 到最后交易日尚未平仓的交易,则以最后价格平仓
if self.mode == self.BAR_MODE:
endPrice = self.bar.close
else:
endPrice = self.tick.lastPrice
for trade in longTrade:
result = TradingResult(trade.price, trade.dt, endPrice, self.dt,
trade.volume, self.rate, self.slippage, self.size)
resultList.append(result)
for trade in shortTrade:
result = TradingResult(trade.price, trade.dt, endPrice, self.dt,
-trade.volume, self.rate, self.slippage, self.size)
resultList.append(result)
# 检查是否有交易
if not resultList:
self.output(u'无交易结果')
return {}
# 然后基于每笔交易的结果,我们可以计算具体的盈亏曲线和最大回撤等
capital = 0 # 资金
maxCapital = 0 # 资金最高净值
drawdown = 0 # 回撤
totalResult = 0 # 总成交数量
totalTurnover = 0 # 总成交金额(合约面值)
totalCommission = 0 # 总手续费
totalSlippage = 0 # 总滑点
timeList = [] # 时间序列
pnlList = [] # 每笔盈亏序列
capitalList = [] # 盈亏汇总的时间序列
drawdownList = [] # 回撤的时间序列
winningResult = 0 # 盈利次数
losingResult = 0 # 亏损次数
totalWinning = 0 # 总盈利金额
totalLosing = 0 # 总亏损金额
for result in resultList:
capital += result.pnl
maxCapital = max(capital, maxCapital)
drawdown = capital - maxCapital
pnlList.append(result.pnl)
timeList.append(result.exitDt) # 交易的时间戳使用平仓时间
capitalList.append(capital)
drawdownList.append(drawdown)
totalResult += 1
totalTurnover += result.turnover
totalCommission += result.commission
totalSlippage += result.slippage
if result.pnl >= 0:
winningResult += 1
totalWinning += result.pnl
else:
losingResult += 1
totalLosing += result.pnl
# 计算盈亏相关数据
winningRate = winningResult/totalResult*100 # 胜率
averageWinning = 0 # 这里把数据都初始化为0
averageLosing = 0
profitLossRatio = 0
if winningResult:
averageWinning = totalWinning/winningResult # 平均每笔盈利
if losingResult:
averageLosing = totalLosing/losingResult # 平均每笔亏损
if averageLosing:
profitLossRatio = -averageWinning/averageLosing # 盈亏比
# 返回回测结果
d = {}
d['capital'] = capital
d['maxCapital'] = maxCapital
d['drawdown'] = drawdown
d['totalResult'] = totalResult
d['totalTurnover'] = totalTurnover
d['totalCommission'] = totalCommission
d['totalSlippage'] = totalSlippage
d['timeList'] = timeList
d['pnlList'] = pnlList
d['capitalList'] = capitalList
d['drawdownList'] = drawdownList
d['winningRate'] = winningRate
d['averageWinning'] = averageWinning
d['averageLosing'] = averageLosing
d['profitLossRatio'] = profitLossRatio
d['posList'] = posList
d['tradeTimeList'] = tradeTimeList
d['resultList'] = resultList
return d
#----------------------------------------------------------------------
def showBacktestingResult(self):
"""显示回测结果"""
d = self.calculateBacktestingResult()
# 输出
self.output('-' * 30)
self.output(u'第一笔交易:\t%s' % d['timeList'][0])
self.output(u'最后一笔交易:\t%s' % d['timeList'][-1])
self.output(u'总交易次数:\t%s' % formatNumber(d['totalResult']))
self.output(u'总盈亏:\t%s' % formatNumber(d['capital']))
self.output(u'最大回撤: \t%s' % formatNumber(min(d['drawdownList'])))
self.output(u'平均每笔盈利:\t%s' %formatNumber(d['capital']/d['totalResult']))
self.output(u'平均每笔滑点:\t%s' %formatNumber(d['totalSlippage']/d['totalResult']))
self.output(u'平均每笔佣金:\t%s' %formatNumber(d['totalCommission']/d['totalResult']))
self.output(u'胜率\t\t%s%%' %formatNumber(d['winningRate']))
self.output(u'盈利交易平均值\t%s' %formatNumber(d['averageWinning']))
self.output(u'亏损交易平均值\t%s' %formatNumber(d['averageLosing']))
self.output(u'盈亏比:\t%s' %formatNumber(d['profitLossRatio']))
# 绘图
fig = plt.figure(figsize=(10, 16))
pCapital = plt.subplot(4, 1, 1)
pCapital.set_ylabel("capital")
pCapital.plot(d['capitalList'], color='r', lw=0.8)
pDD = plt.subplot(4, 1, 2)
pDD.set_ylabel("DD")
pDD.bar(range(len(d['drawdownList'])), d['drawdownList'], color='g')
pPnl = plt.subplot(4, 1, 3)
pPnl.set_ylabel("pnl")
pPnl.hist(d['pnlList'], bins=50, color='c')
pPos = plt.subplot(4, 1, 4)
pPos.set_ylabel("Position")
if d['posList'][-1] == 0:
del d['posList'][-1]
tradeTimeIndex = [item.strftime("%m/%d %H:%M:%S") for item in d['tradeTimeList']]
xindex = np.arange(0, len(tradeTimeIndex), np.int(len(tradeTimeIndex)/10))
tradeTimeIndex = map(lambda i: tradeTimeIndex[i], xindex)
pPos.plot(d['posList'], color='k', drawstyle='steps-pre')
pPos.set_ylim(-1.2, 1.2)
plt.sca(pPos)
plt.tight_layout()
plt.xticks(xindex, tradeTimeIndex, rotation=30) # 旋转15
plt.show()
#----------------------------------------------------------------------
def clearBacktestingResult(self):
"""清空之前回测的结果"""
# 清空限价单相关
self.limitOrderCount = 0
self.limitOrderDict.clear()
self.workingLimitOrderDict.clear()
# 清空停止单相关
self.stopOrderCount = 0
self.stopOrderDict.clear()
self.workingStopOrderDict.clear()
# 清空成交相关
self.tradeCount = 0
self.tradeDict.clear()
#----------------------------------------------------------------------
def runOptimization(self, strategyClass, optimizationSetting):
"""优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 遍历优化
resultList = []
for setting in settingList:
self.clearBacktestingResult()
self.output('-' * 30)
self.output('setting: %s' %str(setting))
self.initStrategy(strategyClass, setting)
self.runBacktesting()
df = self.calculateDailyResult()
df, d = self.calculateDailyStatistics(df)
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
resultList.append(([str(setting)], targetValue, d))
# 显示结果
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'参数:%s,目标:%s' %(result[0], result[1]))
return resultList
#----------------------------------------------------------------------
def runParallelOptimization(self, strategyClass, optimizationSetting):
"""并行优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 多进程优化,启动一个对应CPU核心数量的进程池
pool = multiprocessing.Pool(multiprocessing.cpu_count())
l = []
for setting in settingList:
l.append(pool.apply_async(optimize, (strategyClass, setting,
targetName, self.mode,
self.startDate, self.initDays, self.endDate,
self.slippage, self.rate, self.size, self.priceTick,
self.dbName, self.symbol)))
pool.close()
pool.join()
# 显示结果
resultList = [res.get() for res in l]
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'参数:%s,目标:%s' %(result[0], result[1]))
return resultList
#----------------------------------------------------------------------
def updateDailyClose(self, dt, price):
"""更新每日收盘价"""
date = dt.date()
if date not in self.dailyResultDict:
self.dailyResultDict[date] = DailyResult(date, price)
else:
self.dailyResultDict[date].closePrice = price
#----------------------------------------------------------------------
def calculateDailyResult(self):
"""计算按日统计的交易结果"""
self.output(u'计算按日统计结果')
# 检查成交记录
if not self.tradeDict:
self.output(u'成交记录为空,无法计算回测结果')
return {}
# 将成交添加到每日交易结果中
for trade in self.tradeDict.values():
date = trade.dt.date()
dailyResult = self.dailyResultDict[date]
dailyResult.addTrade(trade)
# 遍历计算每日结果
previousClose = 0
openPosition = 0
for dailyResult in self.dailyResultDict.values():
dailyResult.previousClose = previousClose
previousClose = dailyResult.closePrice
dailyResult.calculatePnl(openPosition, self.size, self.rate, self.slippage )
openPosition = dailyResult.closePosition
# 生成DataFrame
resultDict = {k:[] for k in dailyResult.__dict__.keys()}
for dailyResult in self.dailyResultDict.values():
for k, v in dailyResult.__dict__.items():
resultDict[k].append(v)
resultDf = pd.DataFrame.from_dict(resultDict)
# 计算衍生数据
resultDf = resultDf.set_index('date')
return resultDf
#----------------------------------------------------------------------
def calculateDailyStatistics(self, df):
"""计算按日统计的结果"""
df['balance'] = df['netPnl'].cumsum() + self.capital
df['return'] = (np.log(df['balance']) - np.log(df['balance'].shift(1))).fillna(0)
df['highlevel'] = df['balance'].rolling(min_periods=1,window=len(df),center=False).max()
df['drawdown'] = df['balance'] - df['highlevel']
df['ddPercent'] = df['drawdown'] / df['highlevel'] * 100
# 计算统计结果
startDate = df.index[0]
endDate = df.index[-1]
totalDays = len(df)
profitDays = len(df[df['netPnl']>0])
lossDays = len(df[df['netPnl']<0])
endBalance = df['balance'].iloc[-1]
maxDrawdown = df['drawdown'].min()
maxDdPercent = df['ddPercent'].min()
totalNetPnl = df['netPnl'].sum()
dailyNetPnl = totalNetPnl / totalDays
totalCommission = df['commission'].sum()
dailyCommission = totalCommission / totalDays
totalSlippage = df['slippage'].sum()
dailySlippage = totalSlippage / totalDays
totalTurnover = df['turnover'].sum()
dailyTurnover = totalTurnover / totalDays
totalTradeCount = df['tradeCount'].sum()
dailyTradeCount = totalTradeCount / totalDays
totalReturn = (endBalance/self.capital - 1) * 100
annualizedReturn = totalReturn / totalDays * 240
dailyReturn = df['return'].mean() * 100
returnStd = df['return'].std() * 100
if returnStd:
sharpeRatio = dailyReturn / returnStd * np.sqrt(240)
else:
sharpeRatio = 0
# 返回结果
result = {
'startDate': startDate,
'endDate': endDate,
'totalDays': totalDays,
'profitDays': profitDays,
'lossDays': lossDays,
'endBalance': endBalance,
'maxDrawdown': maxDrawdown,
'maxDdPercent': maxDdPercent,
'totalNetPnl': totalNetPnl,
'dailyNetPnl': dailyNetPnl,
'totalCommission': totalCommission,
'dailyCommission': dailyCommission,
'totalSlippage': totalSlippage,
'dailySlippage': dailySlippage,
'totalTurnover': totalTurnover,
'dailyTurnover': dailyTurnover,
'totalTradeCount': totalTradeCount,
'dailyTradeCount': dailyTradeCount,
'totalReturn': totalReturn,
'annualizedReturn': annualizedReturn,
'dailyReturn': dailyReturn,
'returnStd': returnStd,
'sharpeRatio': sharpeRatio
}
return df, result
#----------------------------------------------------------------------
def showDailyResult(self, df=None, result=None):
"""显示按日统计的交易结果"""
if df is None:
df = self.calculateDailyResult()
df, result = self.calculateDailyStatistics(df)
# 输出统计结果
self.output('-' * 30)
self.output(u'首个交易日:\t%s' % result['startDate'])
self.output(u'最后交易日:\t%s' % result['endDate'])
self.output(u'总交易日:\t%s' % result['totalDays'])
self.output(u'盈利交易日\t%s' % result['profitDays'])
self.output(u'亏损交易日:\t%s' % result['lossDays'])
self.output(u'起始资金:\t%s' % self.capital)
self.output(u'结束资金:\t%s' % formatNumber(result['endBalance']))
self.output(u'总收益率:\t%s%%' % formatNumber(result['totalReturn']))
self.output(u'年化收益:\t%s%%' % formatNumber(result['annualizedReturn']))
self.output(u'总盈亏:\t%s' % formatNumber(result['totalNetPnl']))
self.output(u'最大回撤: \t%s' % formatNumber(result['maxDrawdown']))
self.output(u'百分比最大回撤: %s%%' % formatNumber(result['maxDdPercent']))
self.output(u'总手续费:\t%s' % formatNumber(result['totalCommission']))
self.output(u'总滑点:\t%s' % formatNumber(result['totalSlippage']))
self.output(u'总成交金额:\t%s' % formatNumber(result['totalTurnover']))
self.output(u'总成交笔数:\t%s' % formatNumber(result['totalTradeCount']))
self.output(u'日均盈亏:\t%s' % formatNumber(result['dailyNetPnl']))
self.output(u'日均手续费:\t%s' % formatNumber(result['dailyCommission']))
self.output(u'日均滑点:\t%s' % formatNumber(result['dailySlippage']))
self.output(u'日均成交金额:\t%s' % formatNumber(result['dailyTurnover']))
self.output(u'日均成交笔数:\t%s' % formatNumber(result['dailyTradeCount']))
self.output(u'日均收益率:\t%s%%' % formatNumber(result['dailyReturn']))
self.output(u'收益标准差:\t%s%%' % formatNumber(result['returnStd']))
self.output(u'Sharpe Ratio:\t%s' % formatNumber(result['sharpeRatio']))
# 绘图
fig = plt.figure(figsize=(10, 16))
pBalance = plt.subplot(4, 1, 1)
pBalance.set_title('Balance')
df['balance'].plot(legend=True)
pDrawdown = plt.subplot(4, 1, 2)
pDrawdown.set_title('Drawdown')
pDrawdown.fill_between(range(len(df)), df['drawdown'].values)
pPnl = plt.subplot(4, 1, 3)
pPnl.set_title('Daily Pnl')
df['netPnl'].plot(kind='bar', legend=False, grid=False, xticks=[])
pKDE = plt.subplot(4, 1, 4)
pKDE.set_title('Daily Pnl Distribution')
df['netPnl'].hist(bins=50)
plt.show()
########################################################################
class TradingResult(object):
"""每笔交易的结果"""
#----------------------------------------------------------------------
def __init__(self, entryPrice, entryDt, exitPrice,
exitDt, volume, rate, slippage, size):
"""Constructor"""
self.entryPrice = entryPrice # 开仓价格
self.exitPrice = exitPrice # 平仓价格
self.entryDt = entryDt # 开仓时间datetime
self.exitDt = exitDt # 平仓时间
self.volume = volume # 交易数量(+/-代表方向)
self.turnover = (self.entryPrice+self.exitPrice)*size*abs(volume) # 成交金额
self.commission = self.turnover*rate # 手续费成本
self.slippage = slippage*2*size*abs(volume) # 滑点成本
self.pnl = ((self.exitPrice - self.entryPrice) * volume * size
- self.commission - self.slippage) # 净盈亏
########################################################################
class DailyResult(object):
"""每日交易的结果"""
#----------------------------------------------------------------------
def __init__(self, date, closePrice):
"""Constructor"""
self.date = date # 日期
self.closePrice = closePrice # 当日收盘价
self.previousClose = 0 # 昨日收盘价
self.tradeList = [] # 成交列表
self.tradeCount = 0 # 成交数量
self.openPosition = 0 # 开盘时的持仓
self.closePosition = 0 # 收盘时的持仓
self.tradingPnl = 0 # 交易盈亏
self.positionPnl = 0 # 持仓盈亏
self.totalPnl = 0 # 总盈亏
self.turnover = 0 # 成交量
self.commission = 0 # 手续费
self.slippage = 0 # 滑点
self.netPnl = 0 # 净盈亏
#----------------------------------------------------------------------
def addTrade(self, trade):
"""添加交易"""
self.tradeList.append(trade)
#----------------------------------------------------------------------
def calculatePnl(self, openPosition=0, size=1, rate=0, slippage=0):
"""
计算盈亏
size: 合约乘数
rate:手续费率
slippage:滑点点数
"""
# 持仓部分
self.openPosition = openPosition
self.positionPnl = self.openPosition * (self.closePrice - self.previousClose) * size
self.closePosition = self.openPosition
# 交易部分
self.tradeCount = len(self.tradeList)
for trade in self.tradeList:
if trade.direction == DIRECTION_LONG:
posChange = trade.volume
else:
posChange = -trade.volume
self.tradingPnl += posChange * (self.closePrice - trade.price) * size
self.closePosition += posChange
self.turnover += trade.price * trade.volume * size
self.commission += trade.price * trade.volume * size * rate
self.slippage += trade.volume * size * slippage
# 汇总
self.totalPnl = self.tradingPnl + self.positionPnl
self.netPnl = self.totalPnl - self.commission - self.slippage
########################################################################
class OptimizationSetting(object):
"""优化设置"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.paramDict = OrderedDict()
self.optimizeTarget = '' # 优化目标字段
#----------------------------------------------------------------------
def addParameter(self, name, start, end=None, step=None):
"""增加优化参数"""
if end is None and step is None:
self.paramDict[name] = [start]
return
if end < start:
print(u'参数起始点必须不大于终止点')
return
if step <= 0:
print(u'参数布进必须大于0')
return
l = []
param = start
while param <= end:
l.append(param)
param += step
self.paramDict[name] = l
#----------------------------------------------------------------------
def generateSetting(self):
"""生成优化参数组合"""
# 参数名的列表
nameList = self.paramDict.keys()
paramList = self.paramDict.values()
# 使用迭代工具生产参数对组合
productList = list(product(*paramList))
# 把参数对组合打包到一个个字典组成的列表中
settingList = []
for p in productList:
d = dict(zip(nameList, p))
settingList.append(d)
return settingList
#----------------------------------------------------------------------
def setOptimizeTarget(self, target):
"""设置优化目标字段"""
self.optimizeTarget = target
########################################################################
class HistoryDataServer(RpcServer):
"""历史数据缓存服务器"""
#----------------------------------------------------------------------
def __init__(self, repAddress, pubAddress):
"""Constructor"""
super(HistoryDataServer, self).__init__(repAddress, pubAddress)
self.dbClient = pymongo.MongoClient(globalSetting['mongoHost'],
globalSetting['mongoPort'])
self.historyDict = {}
self.register(self.loadHistoryData)
#----------------------------------------------------------------------
def loadHistoryData(self, dbName, symbol, start, end):
""""""
# 首先检查是否有缓存,如果有则直接返回
history = self.historyDict.get((dbName, symbol, start, end), None)
if history:
print(u'找到内存缓存:%s %s %s %s' %(dbName, symbol, start, end))
return history
# 否则从数据库加载
collection = self.dbClient[dbName][symbol]
if end:
flt = {'datetime':{'$gte':start, '$lt':end}}
else:
flt = {'datetime':{'$gte':start}}
cx = collection.find(flt).sort('datetime')
history = [d for d in cx]
self.historyDict[(dbName, symbol, start, end)] = history
print(u'从数据库加载:%s %s %s %s' %(dbName, symbol, start, end))
return history
#----------------------------------------------------------------------
def runHistoryDataServer():
""""""
repAddress = 'tcp://*:5555'
pubAddress = 'tcp://*:7777'
hds = HistoryDataServer(repAddress, pubAddress)
hds.start()
print(u'按任意键退出')
hds.stop()
raw_input()
#----------------------------------------------------------------------
def formatNumber(n):
"""格式化数字到字符串"""
rn = round(n, 2) # 保留两位小数
return format(rn, ',') # 加上千分符
#----------------------------------------------------------------------
def optimize(strategyClass, setting, targetName,
mode, startDate, initDays, endDate,
slippage, rate, size, priceTick,
dbName, symbol):
"""多进程优化时跑在每个进程中运行的函数"""
engine = BacktestingEngine()
engine.setBacktestingMode(mode)
engine.setStartDate(startDate, initDays)
engine.setEndDate(endDate)
engine.setSlippage(slippage)
engine.setRate(rate)
engine.setSize(size)
engine.setPriceTick(priceTick)
engine.setDatabase(dbName, symbol)
engine.initStrategy(strategyClass, setting)
engine.runBacktesting()
df = engine.calculateDailyResult()
df, d = engine.calculateDailyStatistics(df)
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
return (str(setting), targetValue, d)
| mit |
tmhm/scikit-learn | examples/cluster/plot_cluster_comparison.py | 246 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
bthirion/scikit-learn | sklearn/metrics/scorer.py | 33 | 17925 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.model_selection.GridSearchCV` or
:func:`sklearn.model_selection.cross_val_score` as the ``scoring``
parameter, to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, mean_squared_log_error, accuracy_score,
f1_score, roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from .cluster import homogeneity_score
from .cluster import completeness_score
from .cluster import v_measure_score
from .cluster import mutual_info_score
from .cluster import adjusted_mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
# XXX After removing the deprecated scorers (v0.20) remove the
# XXX deprecation_msg property again and remove __call__'s body again
self._deprecation_msg = None
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
if self._deprecation_msg is not None:
warnings.warn(self._deprecation_msg,
category=DeprecationWarning,
stacklevel=2)
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_PredictScorer, self).__call__(estimator, X, y_true,
sample_weight=sample_weight)
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_ProbaScorer, self).__call__(clf, X, y,
sample_weight=sample_weight)
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_ThresholdScorer, self).__call__(clf, X, y,
sample_weight=sample_weight)
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
scorers = [scorer for scorer in SCORERS
if SCORERS[scorer]._deprecation_msg is None]
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(scorers)))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should be an estimator implementing "
"'fit' method, %r was passed" % estimator)
if isinstance(scoring, six.string_types):
return get_scorer(scoring)
elif has_scoring:
# Heuristic to ensure user has not passed a metric
module = getattr(scoring, '__module__', None)
if hasattr(module, 'startswith') and \
module.startswith('sklearn.metrics.') and \
not module.startswith('sklearn.metrics.scorer') and \
not module.startswith('sklearn.metrics.tests.'):
raise ValueError('scoring value %r looks like it is a metric '
'function rather than a scorer. A scorer should '
'require an estimator as its first parameter. '
'Please use `make_scorer` to convert a metric '
'to a scorer.' % scoring)
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
neg_mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
deprecation_msg = ('Scoring method mean_squared_error was renamed to '
'neg_mean_squared_error in version 0.18 and will '
'be removed in 0.20.')
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_squared_error_scorer._deprecation_msg = deprecation_msg
neg_mean_squared_log_error_scorer = make_scorer(mean_squared_log_error,
greater_is_better=False)
neg_mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
deprecation_msg = ('Scoring method mean_absolute_error was renamed to '
'neg_mean_absolute_error in version 0.18 and will '
'be removed in 0.20.')
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
mean_absolute_error_scorer._deprecation_msg = deprecation_msg
neg_median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
deprecation_msg = ('Scoring method median_absolute_error was renamed to '
'neg_median_absolute_error in version 0.18 and will '
'be removed in 0.20.')
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
median_absolute_error_scorer._deprecation_msg = deprecation_msg
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
neg_log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
deprecation_msg = ('Scoring method log_loss was renamed to '
'neg_log_loss in version 0.18 and will be removed in 0.20.')
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
log_loss_scorer._deprecation_msg = deprecation_msg
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
homogeneity_scorer = make_scorer(homogeneity_score)
completeness_scorer = make_scorer(completeness_score)
v_measure_scorer = make_scorer(v_measure_score)
mutual_info_scorer = make_scorer(mutual_info_score)
adjusted_mutual_info_scorer = make_scorer(adjusted_mutual_info_score)
normalized_mutual_info_scorer = make_scorer(normalized_mutual_info_score)
fowlkes_mallows_scorer = make_scorer(fowlkes_mallows_score)
SCORERS = dict(r2=r2_scorer,
neg_median_absolute_error=neg_median_absolute_error_scorer,
neg_mean_absolute_error=neg_mean_absolute_error_scorer,
neg_mean_squared_error=neg_mean_squared_error_scorer,
neg_mean_squared_log_error=neg_mean_squared_log_error_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
neg_log_loss=neg_log_loss_scorer,
# Cluster metrics that use supervised evaluation
adjusted_rand_score=adjusted_rand_scorer,
homogeneity_score=homogeneity_scorer,
completeness_score=completeness_scorer,
v_measure_score=v_measure_scorer,
mutual_info_score=mutual_info_scorer,
adjusted_mutual_info_score=adjusted_mutual_info_scorer,
normalized_mutual_info_score=normalized_mutual_info_scorer,
fowlkes_mallows_score=fowlkes_mallows_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(metric, pos_label=None,
average=average)
| bsd-3-clause |
wzbozon/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
imaculate/scikit-learn | sklearn/preprocessing/label.py | 28 | 26883 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if n_classes == 1:
if sparse_output:
return sp.csr_matrix((n_samples, 1), dtype=int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
return np.repeat(classes[0], len(y))
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
check_is_fitted(self, 'classes_')
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
check_is_fitted(self, 'classes_')
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
pompiduskus/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
mjgrav2001/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
alf3r/GidroGraf-Sirius | src/main_satelite.py | 1 | 2207 | import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as sc
# Загрузка
filename = '../img/satelite.tif'
img = sc.imread(filename)
# img = img[0:8000, 0:8000]
# # Коррекция гистограммы
for i in range(1, 30):
alpha = 1 * i
a = cv2.convertScaleAbs(img, alpha=alpha, beta=0)
beta = 127 - np.median(a, [0, 1])
a = cv2.convertScaleAbs(img, alpha=alpha, beta=beta)
condition = np.mod(a, 255) == 0
K = np.sum(condition) / a.size
if K > 0.1:
break
# a1 = np.median(a, 0)
# plt.hist(a1, 256, range=[0, 255], fc='k', ec='k')
# plt.show()
img = []
clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(30, 30))
img = clahe.apply(a)
#
#
# Размытие и бинаризация
a = []
a = cv2.blur(img, (20, 20))
# a = cv2.adaptiveThreshold(a, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 201, 1)
retval2, a = cv2.threshold(a, 80, 255, cv2.THRESH_BINARY)
# Поиск контуров
im2, contours, hierarchy = cv2.findContours(a, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
new_boxes = []
new_contours = []
for cnt in contours:
area = cv2.contourArea(cnt)
if (area < 1000000) & (area > 1000):
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
# convert all coordinates floating point values to int
box = np.int0(box)
# draw a red 'nghien' rectangle
new_contours.append(cnt)
new_boxes.append(box)
area = float(area)/9698
area_m = "%.1f km2" % area
cv2.putText(img, area_m, (box[0][0] + 50, box[0][1] + 50), cv2.FONT_HERSHEY_COMPLEX, 4, (0, 0, 0), 8, cv2.LINE_AA)
# 5.35
# 51883
contours = []
cv2.drawContours(img, new_contours, -1, (0, 255, 0), 2)
cv2.drawContours(img, new_boxes, -1, (255, 0, 0), 10)
# Отображение
# figure, axes = plt.subplots(1, 2, sharey=True)
# axes[0].imshow(a, cmap='inferno', interpolation='bicubic', clim=(0, 255))
# axes[1].imshow(img, interpolation='bicubic', clim=(0, 255))
plt.imshow(img, interpolation='bicubic', clim=(0, 255))
plt.show()
| gpl-3.0 |
brentp/vcfanno | scripts/paper/chunk-gap-plot.py | 2 | 2020 | import sys
import re
import numpy as np
from collections import defaultdict
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style("white")
colors = sns.set_palette('Set1', 8)
colors = sns.color_palette('Set1', 3)
f, axes = plt.subplots(1, figsize=(4, 2))
axes = (axes,)
# run as python chunk-gap-plot.py 1kg.times-tails.fmt.txt exac.times-tails.txt
for i, f in enumerate(sys.argv[1:3]):
if i == 0:
assert "1kg" in f.lower()
else:
assert "exac" in f.lower()
groups = defaultdict(list)
for line in open(f):
gap, chunk, procs, info = re.split("\s+", line, 3)
if not int(chunk) in (1000, 10000, 100000): continue
seconds = re.search("in (.+) seconds", info).groups(0)[0]
if gap == '100' or chunk == '100': continue
if int(procs) != 4: continue
groups[(int(gap), int(chunk))].append(float(seconds))
bychunk = defaultdict(list)
for gap, chunk in groups:
#if chunk != 5000: continue
m = np.mean(groups[(gap, chunk)])
bychunk[chunk].append((gap, m))
label = "ExAC" if i == 1 else "1KG"
marker = "o" if label == "ExAC" else "s"
for j, (chunk, vals) in enumerate(sorted(bychunk.items())):
vals.sort()
xs, ys = zip(*vals)
plabel = "%d : %s" % (chunk, label)
if i == 1:
plabel = label
axes[0].plot(xs, ys, color=colors[j], ls="--" if label == "ExAC" else
"-", label=plabel) #, marker=marker)
if i == 0:
axes[0].set_xlabel("Gap size")
axes[0].set_ylabel("Time (seconds)")
sns.despine()
plt.legend(ncol=2, markerfirst=False, title="Chunk size",
loc=(axes[0].get_position().x1-0.45, axes[0].get_position().y1 - 0.085))
ax = plt.gca()
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(7)
for item in ax.get_legend().get_texts():
item.set_fontsize(5)
plt.savefig('figure-5.pdf')
plt.show()
| mit |
untom/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
RuanAragao/peach | tutorial/fuzzy-logic/norms-conorms.py | 6 | 3359 | ################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/norms-conorms.py
# How to use t-norms and s-norms (norms and conorms)
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
import numpy
from peach.fuzzy import *
from peach.fuzzy.norms import *
# The standard operations with sets -- and thus fuzzy sets -- are intersection,
# union and complement. Fuzzy sets, however, are an extension to classical sets,
# and there are infinite ways to extend those operations. Thus the existence of
# norms, conorms and negations. We show here how to use them in Peach.
# First, remember that we must create the sets. A FuzzySet instance is returned
# when you apply a membership function over a domain. It is, in fact, a
# standard array, but making it a new class allow us to redefine operations.
# Here we create the sets:
x = numpy.linspace(-5.0, 5.0, 500)
a = Triangle(-3.0, -1.0, 1.0)(x)
b = Triangle(-1.0, 1.0, 3.0)(x)
# To set norms, conorms and negations, we use, respectively, the methods
# set_norm, set_conorm and set_negation. Notice that those are class methods, so
# if you change the norm for one instance of a set, you change for them all! So,
# it is better to use the class name to select the methods. Here, we will use
# Zadeh norms, that are already defined in Peach. Notice that we use the
# standard operators for and, or and not operations (respectively, &, | e ~):
FuzzySet.set_norm(ZadehAnd)
FuzzySet.set_conorm(ZadehOr)
aandb_zadeh = a & b # A and B
aorb_zadeh = a | b # A or B
# Probabilistic norms are based on the corresponding operations in probability.
# Here we use them
FuzzySet.set_norm(ProbabilisticAnd)
FuzzySet.set_conorm(ProbabilisticOr)
aandb_prob = a & b
aorb_prob = a | b
# There are other norms that we could use. Please, check the documentation for
# a complete list. Here are some of them:
# Norms: ZadehAnd, ProbabilisticAnd, DrasticProduct, EinsteinProduct
# Conorms: ZadehOr, ProbabilisticOr, DrasticSum, EinsteinSum
# We will use the matplotlib module to plot these functions. We save the plot in
# a figure called 'norms-conorms.png'.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8, 6)
a1 = axes([ 0.125, 0.555, 0.775, 0.40 ])
a2 = axes([ 0.125, 0.125, 0.775, 0.40 ])
a1.hold(True)
a1.plot(x, a, 'k:')
a1.plot(x, b, 'k:')
a1.plot(x, aandb_zadeh, 'k')
a1.plot(x, aandb_prob, 'k-.')
a1.set_xlim([ -5, 5 ])
a1.set_ylim([ -0.1, 1.1 ])
a1.set_xticks([])
a1.set_yticks([ 0.0, 1.0 ])
a1.legend((r'$A$', r'$B$', 'Zadeh AND', 'Prob. AND'))
a2.hold(True)
a2.plot(x, a, 'k:')
a2.plot(x, b, 'k:')
a2.plot(x, aorb_zadeh, 'k')
a2.plot(x, aorb_prob, 'k-.')
a2.set_xlim([ -5, 5 ])
a2.set_ylim([ -0.1, 1.1 ])
a2.set_xticks([])
a2.set_yticks([ 0.0, 1.0 ])
a2.legend((r'$A$', r'$B$', 'Zadeh OR', 'Prob. OR'))
savefig("norms-conorms.png")
except ImportError:
pass
| lgpl-2.1 |
tomlof/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 31 | 13747 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from .base import _pkl_filepath
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset : 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home : optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories : None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle : bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing : optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove : tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
logger.info("Downloading 20news dataset. "
"This may take a few minutes.")
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset : 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home : optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove : tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = _pkl_filepath(data_home, filebase + ".pkl")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
intel-analytics/analytics-zoo | pyzoo/test/zoo/chronos/model/forecast/test_tcmf_forecaster.py | 1 | 14341 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from zoo.chronos.model.forecast.tcmf_forecaster import TCMFForecaster
from unittest import TestCase
import tempfile
import pandas as pd
class TestChronosModelTCMFForecaster(TestCase):
def setUp(self):
self.model = TCMFForecaster()
self.num_samples = 300
self.horizon = np.random.randint(1, 50)
self.seq_len = 480
self.data = np.random.rand(self.num_samples, self.seq_len)
self.id = np.arange(self.num_samples)
self.data_new = np.random.rand(self.num_samples, self.horizon)
self.fit_params = dict(val_len=12,
start_date="2020-1-1",
freq="5min",
y_iters=1,
init_FX_epoch=1,
max_FX_epoch=1,
max_TCN_epoch=1,
alt_iters=2)
def test_forecast_tcmf_ndarray(self):
ndarray_input = {'id': self.id, 'y': self.data}
self.model.fit(ndarray_input, **self.fit_params)
assert not self.model.is_xshards_distributed()
# test predict
yhat = self.model.predict(horizon=self.horizon)
# test save load
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat_loaded = loaded_model.predict(horizon=self.horizon)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
# test evaluate
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value, metric=['mse'])
# test fit_incremental
self.model.fit_incremental({'y': self.data_new}) # 1st time
self.model.fit_incremental({'y': self.data_new}) # 2nd time
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
def test_tcmf_ndarray_covariates_dti(self):
ndarray_input = {'id': self.id, 'y': self.data}
self.model.fit(ndarray_input,
covariates=np.random.rand(3, self.seq_len),
dti=pd.date_range('20130101', periods=self.seq_len),
**self.fit_params)
future_covariates = np.random.randn(3, self.horizon)
future_dti = pd.date_range('20130101', periods=self.horizon)
# test predict
yhat = self.model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
# test save load
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat_loaded = loaded_model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
# test evaluate
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value,
target_covariates=future_covariates,
target_dti=future_dti,
metric=['mse'])
# test fit_incremental
self.model.fit_incremental({'y': self.data_new},
covariates_incr=future_covariates,
dti_incr=future_dti,)
yhat_incr = self.model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
def test_forecast_ndarray_error(self):
# is_xshards_distributed
with self.assertRaises(Exception) as context:
self.model.is_xshards_distributed()
self.assertTrue('You should run fit before calling is_xshards_distributed()'
in str(context.exception))
# fit
input = dict({'data': self.data})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
input = dict({'y': "abc"})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("the value of y should be an ndarray" in str(context.exception))
id_diff = np.arange(200)
input = dict({'id': id_diff, 'y': self.data})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("the length of the id array should be equal to the number of"
in str(context.exception))
input_right = dict({'id': self.id, 'y': self.data})
self.model.fit(input_right, **self.fit_params)
with self.assertRaises(Exception) as context:
self.model.fit(input_right)
self.assertTrue('This model has already been fully trained' in str(context.exception))
# fit_incremental
data_id_diff = {'id': self.id - 1, 'y': self.data_new}
with self.assertRaises(ValueError) as context:
self.model.fit_incremental(data_id_diff)
self.assertTrue('The input ids in fit_incremental differs from input ids in fit'
in str(context.exception))
# evaluate
target_value_fake = dict({"data": self.data_new})
with self.assertRaises(Exception) as context:
self.model.evaluate(target_value=target_value_fake, metric=['mse'])
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
def test_forecast_tcmf_without_id(self):
# construct data
input = dict({'y': self.data})
self.model.fit(input, **self.fit_params)
assert not self.model.is_xshards_distributed()
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat = self.model.predict(horizon=self.horizon)
yhat_loaded = loaded_model.predict(horizon=self.horizon)
assert "id" not in yhat_loaded
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
target_value = dict({"y": self.data_new})
self.model.evaluate(target_value=target_value, metric=['mse'])
self.model.fit_incremental({'y': self.data_new}) # 1st time
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
data_new_id = {'id': self.id, 'y': self.data_new}
with self.assertRaises(ValueError) as context:
self.model.fit_incremental(data_new_id)
self.assertTrue('Got valid id in fit_incremental and invalid id in fit.'
in str(context.exception))
def test_forecast_tcmf_xshards(self):
from zoo.orca import OrcaContext
import zoo.orca.data.pandas
import pandas as pd
OrcaContext.pandas_read_backend = "pandas"
def preprocessing(df, id_name, y_name):
id = df.index
data = df.to_numpy()
result = dict({id_name: id, y_name: data})
return result
def postprocessing(pred_results, output_dt_col_name):
id_arr = pred_results["id"]
pred_results = pred_results["prediction"]
pred_results = np.concatenate((np.expand_dims(id_arr, axis=1), pred_results), axis=1)
final_df = pd.DataFrame(pred_results, columns=["id"] + output_dt_col_name)
final_df.id = final_df.id.astype("int")
final_df = final_df.set_index("id")
final_df.columns.name = "datetime"
final_df = final_df.unstack().reset_index().rename({0: "prediction"}, axis=1)
return final_df
def get_pred(d):
return d["prediction"]
with tempfile.NamedTemporaryFile() as temp:
data = np.random.rand(300, 480)
df = pd.DataFrame(data)
df.to_csv(temp.name)
shard = zoo.orca.data.pandas.read_csv(temp.name)
shard.cache()
shard_train = shard.transform_shard(preprocessing, 'id', 'data')
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
shard_train = shard.transform_shard(preprocessing, 'cid', 'y')
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue("key `id` doesn't exist in x" in str(context.exception))
with self.assertRaises(Exception) as context:
self.model.is_xshards_distributed()
self.assertTrue('You should run fit before calling is_xshards_distributed()'
in str(context.exception))
shard_train = shard.transform_shard(preprocessing, 'id', 'y')
self.model.fit(shard_train, **self.fit_params)
assert self.model.is_xshards_distributed()
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue('This model has already been fully trained' in str(context.exception))
with self.assertRaises(Exception) as context:
self.model.fit_incremental(shard_train)
self.assertTrue('NotImplementedError' in context.exception.__class__.__name__)
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname + "/model")
loaded_model = TCMFForecaster.load(tempdirname + "/model", is_xshards_distributed=True)
horizon = np.random.randint(1, 50)
yhat_shard_origin = self.model.predict(horizon=horizon)
yhat_list_origin = yhat_shard_origin.collect()
yhat_list_origin = list(map(get_pred, yhat_list_origin))
yhat_shard = loaded_model.predict(horizon=horizon)
yhat_list = yhat_shard.collect()
yhat_list = list(map(get_pred, yhat_list))
yhat_origin = np.concatenate(yhat_list_origin)
yhat = np.concatenate(yhat_list)
assert yhat.shape == (300, horizon)
np.testing.assert_equal(yhat, yhat_origin)
output_dt_col_name = pd.date_range(start='2020-05-01', periods=horizon, freq='H').to_list()
yhat_df_shards = yhat_shard.transform_shard(postprocessing, output_dt_col_name)
final_df_list = yhat_df_shards.collect()
final_df = pd.concat(final_df_list)
final_df.sort_values("datetime", inplace=True)
assert final_df.shape == (300 * horizon, 3)
OrcaContext.pandas_read_backend = "spark"
def test_forecast_tcmf_distributed(self):
input = dict({'id': self.id, 'y': self.data})
from zoo.orca import init_orca_context, stop_orca_context
init_orca_context(cores=4, spark_log_level="INFO", init_ray_on_spark=True,
object_store_memory="1g")
self.model.fit(input, num_workers=4, **self.fit_params)
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat = self.model.predict(horizon=self.horizon, num_workers=4)
yhat_loaded = loaded_model.predict(horizon=self.horizon, num_workers=4)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_equal(yhat, yhat_loaded)
self.model.fit_incremental({'y': self.data_new})
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value, metric=['mse'])
stop_orca_context()
if __name__ == "__main__":
pytest.main([__file__])
| apache-2.0 |
mjudsp/Tsallis | sklearn/gaussian_process/tests/test_gpr.py | 23 | 11915 | """Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
"""Test the interpolating property for different kernels."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_true(np.allclose(y_pred, y))
assert_true(np.allclose(np.diag(y_cov), 0.))
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
""" Test that hyperparameter-optimization remains in bounds"""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
""" Test that GP prior has mean 0 and identical variances."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
""" Test that kernel parameters are unmodified when optimizer is None."""
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
""" Test that predicted std.-dev. is consistent with cov's diagonal."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
""" Test that GPR can identify meaningful anisotropic length-scales. """
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
""" Test that GPR can deal with multi-dimensional target values"""
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
""" Test that GPR can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_duplicate_input():
""" Test GPR can handle two different output-values for the same input. """
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | education_examples/_Chapter_Modeling_and_Simulation_Examples_Static_Examples/Contact_Normal_Interface_Behaviour_HardContact_Nonlinear_Hardening_Softening_Shear_Model/plot.py | 8 | 1187 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import sys
import numpy as np;
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
normal_strain = finput["/Model/Elements/Element_Outputs"][6,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure()
plt.plot(normal_strain,normal_stress,'-k',Linewidth=4)
plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma$")
plt.savefig("Contact_Normal_Interface_Behavour.pdf", bbox_inches='tight')
plt.show()
# ##################################################################### | cc0-1.0 |
marqh/cartopy | lib/cartopy/gshhs.py | 1 | 6450 | # (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
# PLEASE NOTE: DUE TO SOME MPL RELATED ISSUES, THE GSHHS SUPPORT HAS BEEN DISABLED.
# IT IS ANTICIPATED THAT BY 0.5 THERE WILL BE A CLEAN AND TIDY INTERFACE
# TO USE THIS USEFUL DATASET. - pelson
import matplotlib.patches as mpatches
import matplotlib.path as mpath
from matplotlib.collections import PatchCollection
import matplotlib.cm
import numpy
import os
from shapely.geometry import Polygon
# XXX Make the data dir configurable
project_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
data_dir = os.path.join(project_dir, 'data')
gshhs_data_dir = os.path.join(data_dir, 'gshhs')
fnames = {
'coarse': os.path.join(gshhs_data_dir, 'gshhs_c.b'),
'low': os.path.join(gshhs_data_dir, 'gshhs_l.b'),
'intermediate': os.path.join(gshhs_data_dir, 'gshhs_i.b'),
'high': os.path.join(gshhs_data_dir, 'gshhs_h.b'),
'full': os.path.join(gshhs_data_dir, 'gshhs_f.b'),
}
def read_gshhc(filename, poly=True, domain=None, filter_predicate=None):
"""
Reads:
Global Self-consistent Hierarchical High-resolution Shorelines
version 2.0 July 15, 2009
.. seealso:: http://www.soest.hawaii.edu/pwessel/gshhs/README.TXT
XXX: Return internal polygons when appropriate
"""
DEBUG = False
fh = open(filename, 'rb')
#(0, 360, -90, 90)
if domain is None:
domain = Polygon([[0, -90], [360, -90], [360, 90], [0, 90], [0, -90]])
extent_w, extent_s, extent_e, extent_n = [v * 1e6 for v in domain.bounds]
# corners = [extent_w, extent_n], [extent_w, extent_s], [extent_e, extent_s], [extent_e, extent_n]
#
# poly_extent = Polygon(numpy.array(corners) / 1e6)
poly_extent = domain
i=-1
# XXX
while True:
i += 1
# for i in xrange(10000):
# if i % 10000 == 1: print i
header = numpy.fromfile(fh, dtype='>i4', count=11)
# If no header was received, we are at the end of the file
if len(header) == 0:
break
if DEBUG:
if i not in ([
# 0, # Europe & Asia
# 1, # Africa
# 2, # USA
# 3, # S.America
# 4, # Antarctic
14, # UK
# 25, # Ireland
]):
continue
flag = header[2]
crosses_greenwich = (flag >> 16) & 1
flag = header[2]
level = flag & 255
# ###########################
# Filter the shapes by extent
# ###########################
# get the maximum extent in microdegrees
w, e, south, north = header[3:7]
in_x_range = False
in_y_range = False
# handle the case where the uk has an extent of -6230861 1765806 and Ireland has an extent of 349515833 354569167
# XXX I'm sure this could be done more cleanly
for off in range(2):
west = w - 360 * 1e6 * off
east = e - 360 * 1e6 * off
in_x_range = in_x_range or (extent_w <= west <= extent_e or extent_w <= east <= extent_e or (east >= extent_e and west <= extent_w))
in_y_range = in_y_range or (extent_s <= south <= extent_n or extent_s <= north <= extent_n or (north >= extent_n and south <= extent_s))
if not (in_x_range and in_y_range):
if DEBUG: print in_x_range, in_y_range, w, e, south, north, extent_w, extent_e
fh.seek(header[1]*2 * 4, 1)
continue
else:
if DEBUG: print in_x_range, in_y_range, w, e, south, north, extent_w, extent_e
points = numpy.fromfile(fh, dtype='>i4', count=header[1]*2) * 1.0e-6
points = points.reshape(-1, 2)
intersects = False
for off in range(2):
## west = points - numpy.array([[360 * off, 0]])
# east = points - numpy.array([[360 * off, 0]])
poly_shape = Polygon(points - numpy.array([[360 * off, 0]]))
# print (points - numpy.array([[360 * off, 0]]))[:10, ...]
# print corners
# print 'intersect? ', i, off*360, poly_shape.intersects(poly_extent)
intersects = intersects or poly_shape.intersects(poly_extent)
if not intersects:
continue
lons, lats = points[:, 0:1], points[:, 1:2]
if poly:
if ( level == 1 and
points.shape[0] > 4
):
yield points
else:
yield points
# break
# yield header, lons, lats
# if points.shape[0] > 4:
# yield header, lons, lats
# yield points
# if crosses_greenwich:
# # If the greenwich has been crossed, then 360 is added to any number below 0 in this format.
# # To fix this, identify any points which are more than 180 degrees apart, using this information we can identify
# # polygon groups and shift them appropriately.
# delta = numpy.diff(lons)
# step = numpy.where(numpy.abs(delta) > 180)[0]
# step = [0] + list(step+1) + [None]
# for s1, s2 in zip(step[:-1] , step[1:]):
# if delta[s1-1] > 180:
# lons[s1:s2] -= 360
#
# if i == 4:
# # antarctic
# lons = numpy.array(list(lons) + [lons[-1], lons[0], lons[0]])
# lats = numpy.array(list(lats) + [-90, -90, lats[0]])
# yield header, lons, lats
| gpl-3.0 |
amolkahat/pandas | pandas/core/computation/ops.py | 7 | 15907 | """Operator classes for eval.
"""
import operator as op
from functools import partial
from datetime import datetime
import numpy as np
from pandas.core.dtypes.common import is_list_like, is_scalar
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.base import StringMixin
from pandas.core.computation.common import _ensure_decoded, _result_type_many
from pandas.core.computation.scope import _DEFAULT_GLOBALS
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs')
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def is_scalar(self):
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __unicode__(self):
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(pprint_thing(opr))
for opr in self.operands)
return pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def is_scalar(self):
return all(operand.is_scalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, acceptable_dtypes, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
.. versionadded:: 0.19.0
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
res = pd.eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.is_scalar or self.rhs.is_scalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
# do not upcast float32s to float64 un-necessarily
acceptable_dtypes = [np.float32, np.float_]
_cast_inplace(com.flatten(self), acceptable_dtypes, np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return pprint_thing('{0}({1})'.format(self.op, self.operand))
@property
def return_type(self):
operand = self.operand
if operand.return_type == np.dtype('bool'):
return np.dtype('bool')
if (isinstance(operand, Op) and
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
class MathCall(Op):
def __init__(self, func, args):
super(MathCall, self).__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
with np.errstate(all='ignore'):
return self.func.func(*operands)
def __unicode__(self):
operands = map(str, self.operands)
return pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
class FuncNode(object):
def __init__(self, name):
if name not in _mathops:
raise ValueError(
"\"{0}\" is not a supported function".format(name))
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
| bsd-3-clause |
macks22/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
gandalfcode/gandalf | tests/paper_tests/khitest.py | 1 | 4705 | #==============================================================================
# khitest.py
#==============================================================================
from gandalf.analysis.facade import *
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import rc
import time
rc('font', **{'family': 'serif', 'size' : 14})
# Set all plot limits
xmin = -0.5
xmax = 0.5
ymin = -0.5
ymax = 0.5
rhomin = 0.9
rhomax = 2.1
#loadsim('KHI-GRADH')
khi_gradh_sim = newsim('khi-gradh.dat')
setupsim()
run()
data0_0 = get_render_data('x', 'y', 'rho', sim=0, snap=5, res=256)
data0_1 = get_render_data('x', 'y', 'rho', sim=0, snap=10, res=256)
data0_2 = get_render_data('x', 'y', 'rho', sim=0, snap=15, res=256)
data0_3 = get_render_data('x', 'y', 'rho', sim=0, snap=20, res=256)
data0_4 = get_render_data('x', 'y', 'rho', sim=0, snap=25, res=256)
#loadsim('KHI-MFV-MOVING')
khi_mfv_sim = newsim('khi-mfv-moving.dat')
setupsim()
run()
data1_0 = get_render_data('x', 'y', 'rho', sim=1, snap=5, res=256)
data1_1 = get_render_data('x', 'y', 'rho', sim=1, snap=10, res=256)
data1_2 = get_render_data('x', 'y', 'rho', sim=1, snap=15, res=256)
data1_3 = get_render_data('x', 'y', 'rho', sim=1, snap=20, res=256)
data1_4 = get_render_data('x', 'y', 'rho', sim=1, snap=25, res=256)
#loadsim('KHI-MFV-MOVING')
khi_mfm_sim = newsim('khi-mfv-moving.dat')
khi_mfm_sim.SetParam('zero_mass_flux',1)
khi_mfm_sim.SetParam('run_id','KHI-MFM-MOVING')
setupsim()
run()
data2_0 = get_render_data('x', 'y', 'rho', sim=2, snap=5, res=256)
data2_1 = get_render_data('x', 'y', 'rho', sim=2, snap=10, res=256)
data2_2 = get_render_data('x', 'y', 'rho', sim=2, snap=15, res=256)
data2_3 = get_render_data('x', 'y', 'rho', sim=2, snap=20, res=256)
data2_4 = get_render_data('x', 'y', 'rho', sim=2, snap=25, res=256)
fig, ax = plt.subplots(3, 5, sharey='row', figsize=(15,9))
fig.subplots_adjust(hspace=0.01, wspace=0.01)
fig.subplots_adjust(bottom=0.0, top=1.0, left=0.0, right=1.0)
#fig = plt.figure(figsize=(10,3))
#fig.subplots_adjust(wspace=0.001,hspace=0.001)
#ax1.set_xlim([xmin, xmax])
#ax2.set_xlim([xmin, xmax])
#ax3.set_xlim([xmin, xmax])
#ax1.set_ylim([ymin, ymax])
#ax1 = fig.add_subplot(1,3,1,aspect='equal',xlim=[xmin,xmax],ylim=[ymin,ymax])
ax[0][0].text(14, 22, '(a) Grad-h SPH', color='white', size=14)
ax[0][0].set_axis_off()
ax[0][0].imshow(data0_0, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[0][1].set_axis_off()
ax[0][1].imshow(data0_1, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[0][2].set_axis_off()
ax[0][2].imshow(data0_2, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[0][3].set_axis_off()
ax[0][3].imshow(data0_3, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[0][4].set_axis_off()
ax[0][4].imshow(data0_4, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[1][0].text(14, 22, '(b) MFV', color='white', size=14)
ax[1][0].set_axis_off()
ax[1][0].imshow(data1_0, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[1][1].set_axis_off()
ax[1][1].imshow(data1_1, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[1][2].set_axis_off()
ax[1][2].imshow(data1_2, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[1][3].set_axis_off()
ax[1][3].imshow(data1_3, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[1][4].set_axis_off()
ax[1][4].imshow(data1_4, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[2][0].text(14, 22, '(c) MFM', color='white', size=14)
ax[2][0].set_axis_off()
ax[2][0].imshow(data2_0, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[2][1].set_axis_off()
ax[2][1].imshow(data2_1, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[2][2].set_axis_off()
ax[2][2].imshow(data2_2, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[2][3].set_axis_off()
ax[2][3].imshow(data2_3, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
ax[2][4].set_axis_off()
ax[2][4].imshow(data2_4, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
#ax2 = fig.add_subplot(1,3,2,aspect='equal',sharey=ax1,xlim=[xmin,xmax],ylim=[ymin,ymax])
#ax[1].set_axis_off()
#ax[1].imshow(data1, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
#ax[1].text(10, 22, '(b) MFV', color='white', size=14)
#ax3 = fig.add_subplot(1,3,3,aspect='equal',sharey=ax1,xlim=[xmin,xmax],ylim=[ymin,ymax])
#ax[2].set_axis_off()
#ax[2].imshow(data2, interpolation='nearest', cmap=cm.jet, vmin=rhomin, vmax=rhomax)
#ax[2].text(10, 22, '(c) MFM', color='white', size=14)
#fig.tight_layout()
plt.show()
fig.savefig('khi.pdf', dpi=100)
| gpl-2.0 |
tomasreimers/tensorflow-emscripten | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 5 | 55320 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import inspect
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Concrete implementation of this class should provide the following functions:
* _get_train_ops
* _get_eval_ops
* _get_predict_ops
`Estimator` implemented below is a good example of how to use this class.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: A RunConfig instance.
"""
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated_arg_values(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate. '
'input_fn (and in most cases, input_feature_key) will become required '
'args, and use_deprecated_input_fn will default to False and be removed '
'altogether.',
use_deprecated_input_fn=True,
input_fn=None)
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
# The default return type of _get_eval_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_eval_ops returns an
# `eval_dict` dictionary of Tensors. The following else-statement code
# covers these cases, but will soon be deleted after the subclasses are
# updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
eval_ops = self._get_eval_ops(features, labels, metrics)
if isinstance(eval_ops, model_fn_lib.ModelFnOps): # Default signature
eval_dict = eval_ops.eval_metric_ops
else: # Legacy signature
eval_dict = eval_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
hooks = hooks or []
if feed_fn:
hooks.append(_FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._call_legacy_get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._call_legacy_get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.training_scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self.config.tf_config) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _call_legacy_get_predict_ops(self, features):
# The default return type of _get_predict_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_predict_ops returns a
# `predictions` Tensor or dict or Tensors. The following else-statement
# code covers these cases, but will soon be deleted after the subclasses
# are updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
infer_ops = self._get_predict_ops(features)
if isinstance(infer_ops, model_fn_lib.ModelFnOps): # Default signature
return infer_ops
return model_fn_lib.ModelFnOps(
mode=model_fn_lib.ModeKeys.INFER, predictions=infer_ops)
def _call_legacy_get_train_ops(self, features, labels):
train_ops = self._get_train_ops(features, labels)
if isinstance(train_ops, model_fn_lib.ModelFnOps): # Default signature
return train_ops
return model_fn_lib.ModelFnOps(
mode=model_fn_lib.ModeKeys.TRAIN,
predictions=None,
loss=train_ops[1],
train_op=train_ops[0])
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _get_arguments(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _get_arguments(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_ops should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
@experimental
def export_savedmodel(
self, export_dir_base, input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
exports_to_keep: Number of exports to keep.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if input_fn is None:
raise ValueError('input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the input_fn and collect the input alternatives.
input_ops = input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output signatures
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Locate the latest checkpoint
# TODO(soergel): does it help that we know we have one from this step?
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
with tf_session.Session('') as session:
variables.initialize_local_variables()
data_flow_ops.tables_initializer()
saver_for_restore = saver.Saver(
variables.global_variables(),
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
data_flow_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
class _FeedFnHook(session_run_hook.SessionRunHook):
"""Runs feed_fn and sets the feed_dict accordingly."""
def __init__(self, feed_fn):
self.feed_fn = feed_fn
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
fetches=None, feed_dict=self.feed_fn())
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [_FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| apache-2.0 |
mxjl620/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
cbertinato/pandas | pandas/core/indexes/datetimelike.py | 1 | 26175 | """
Base and utility classes for tseries type pandas objects.
"""
import operator
from typing import Set
import warnings
import numpy as np
from pandas._libs import NaT, iNaT, lib
from pandas._libs.algos import unique_deltas
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_int64, is_dtype_equal, is_float, is_integer, is_list_like,
is_period_dtype, is_scalar)
from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
from pandas.core import algorithms, ops
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import ExtensionOpsMixin
from pandas.core.arrays.datetimelike import (
DatetimeLikeArrayMixin, _ensure_datetimelike_to_i8)
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.tools.timedeltas import to_timedelta
import pandas.io.formats.printing as printing
from pandas.tseries.frequencies import to_offset
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
def ea_passthrough(array_method):
"""
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
array_method : method on an Array class
Returns
-------
method
"""
def method(self, *args, **kwargs):
return array_method(self._data, *args, **kwargs)
method.__name__ = array_method.__name__
method.__doc__ = array_method.__doc__
return method
class DatetimeIndexOpsMixin(ExtensionOpsMixin):
"""
common ops mixin to support a unified interface datetimelike Index
"""
_data = None
# DatetimeLikeArrayMixin assumes subclasses are mutable, so these are
# properties there. They can be made into cache_readonly for Index
# subclasses bc they are immutable
inferred_freq = cache_readonly(
DatetimeLikeArrayMixin.inferred_freq.fget) # type: ignore
_isnan = cache_readonly(DatetimeLikeArrayMixin._isnan.fget) # type: ignore
hasnans = cache_readonly(
DatetimeLikeArrayMixin._hasnans.fget) # type: ignore
_hasnans = hasnans # for index / array -agnostic code
_resolution = cache_readonly(
DatetimeLikeArrayMixin._resolution.fget) # type: ignore
resolution = cache_readonly(
DatetimeLikeArrayMixin.resolution.fget) # type: ignore
_maybe_mask_results = ea_passthrough(
DatetimeLikeArrayMixin._maybe_mask_results)
__iter__ = ea_passthrough(DatetimeLikeArrayMixin.__iter__)
mean = ea_passthrough(DatetimeLikeArrayMixin.mean)
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._data.freq
@freq.setter
def freq(self, value):
# validation is handled by _data setter
self._data.freq = value
@property
def freqstr(self):
"""
Return the frequency object as a string if it is set, otherwise None.
"""
return self._data.freqstr
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self._data.unique()
# Note: if `self` is already unique, then self.unique() should share
# a `freq` with self. If not already unique, then self.freq must be
# None, so again sharing freq is correct.
return self._shallow_copy(result._data)
@classmethod
def _create_comparison_method(cls, op):
"""
Create a comparison method that dispatches to ``cls.values``.
"""
def wrapper(self, other):
if isinstance(other, ABCSeries):
# the arrays defer to Series for comparison ops but the indexes
# don't, so we have to unwrap here.
other = other._values
result = op(self._data, maybe_unwrap_index(other))
return result
wrapper.__doc__ = op.__doc__
wrapper.__name__ = '__{}__'.format(op.__name__)
return wrapper
@property
def _ndarray_values(self):
return self._data._ndarray_values
# ------------------------------------------------------------------------
# Abstract data attributes
@property
def values(self):
# Note: PeriodArray overrides this to return an ndarray of objects.
return self._data._data
@property # type: ignore # https://github.com/python/mypy/issues/1362
@Appender(DatetimeLikeArrayMixin.asi8.__doc__)
def asi8(self):
return self._data.asi8
# ------------------------------------------------------------------------
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, ABCIndexClass):
return False
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
except Exception:
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
elif is_period_dtype(self):
if not is_period_dtype(other):
return False
if self.freq != other.freq:
return False
return np.array_equal(self.asi8, other.asi8)
@staticmethod
def _join_i8_wrapper(joinf, dtype, with_indexers=True):
"""
Create the join wrapper methods.
"""
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
@staticmethod
def wrapper(left, right):
if isinstance(left, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
left = left.view('i8')
if isinstance(right, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
right = right.view('i8')
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise',
from_utc=False):
# See DatetimeLikeArrayMixin._ensure_localized.__doc__
if getattr(self, 'tz', None):
# ensure_localized is only relevant for tz-aware DTI
result = self._data._ensure_localized(arg,
ambiguous=ambiguous,
nonexistent=nonexistent,
from_utc=from_utc)
return type(self)._simple_new(result, name=self.name)
return arg
def _box_values(self, values):
return self._data._box_values(values)
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def __contains__(self, key):
try:
res = self.get_loc(key)
return (is_scalar(res) or isinstance(res, slice) or
(is_list_like(res) and len(res)))
except (KeyError, TypeError, ValueError):
return False
contains = __contains__
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, mapper, na_action=None):
try:
result = mapper(self)
# Try to use this result if we can
if isinstance(result, np.ndarray):
result = Index(result)
if not isinstance(result, Index):
raise TypeError('The map function must return an Index object')
return result
except Exception:
return self.astype(object).map(mapper)
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index.
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self._ndarray_values)
attribs = self._get_attributes_dict()
freq = attribs['freq']
if freq is not None and not is_period_dtype(self):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs['freq'] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_int64(indices)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
taken = self._assert_take_fillable(self.asi8, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=iNaT)
# keep freq in PeriodArray/Index, reset otherwise
freq = self.freq if is_period_dtype(self) else None
return self._shallow_copy(taken, freq=freq)
_can_hold_na = True
_na_value = NaT
"""The expected NA value to use with this index."""
@property
def asobject(self):
"""
Return object Index which contains boxed values.
.. deprecated:: 0.23.0
Use ``astype(object)`` instead.
*this is an internal non-public method*
"""
warnings.warn("'asobject' is deprecated. Use 'astype(object)'"
" instead", FutureWarning, stacklevel=2)
return self.astype(object)
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def tolist(self):
"""
Return a list of the underlying data.
"""
return list(self.astype(object))
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
if skipna:
min_stamp = self[~self._isnan].asi8.min()
else:
return self._na_value
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Series.max : Return the maximum value in a Series.
"""
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
if skipna:
max_stamp = self[~self._isnan].asi8.max()
else:
return self._na_value
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
# --------------------------------------------------------------------
# Rendering Methods
def _format_with_header(self, header, na_rep='NaT', **kwargs):
return header + list(self._format_native_types(na_rep, **kwargs))
@property
def _formatter_func(self):
raise AbstractMethodError(self)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
if attrib == 'freq':
freq = self.freqstr
if freq is not None:
freq = "'%s'" % freq
attrs.append(('freq', freq))
return attrs
# --------------------------------------------------------------------
def _convert_scalar_indexer(self, key, kind=None):
"""
We don't allow integer or float indexing on datetime-like when using
loc.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ['loc'] and (is_int or is_flt):
self._invalid_indexer('index', key)
elif kind in ['ix', 'getitem'] and is_flt:
self._invalid_indexer('index', key)
return super()._convert_scalar_indexer(key, kind=kind)
@classmethod
def _add_datetimelike_methods(cls):
"""
Add in the datetimelike methods (as we may have to override the
superclass).
"""
def __add__(self, other):
# dispatch to ExtensionArray implementation
result = self._data.__add__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__add__ = __add__
def __radd__(self, other):
# alias for __add__
return self.__add__(other)
cls.__radd__ = __radd__
def __sub__(self, other):
# dispatch to ExtensionArray implementation
result = self._data.__sub__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__sub__ = __sub__
def __rsub__(self, other):
result = self._data.__rsub__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__rsub__ = __rsub__
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if level is not None:
self._validate_index_level(level)
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.astype(object).isin(values)
return algorithms.isin(self.asi8, values.asi8)
def intersection(self, other, sort=False):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
if len(self) == 0:
return self.copy()
if len(other) == 0:
return other.copy()
if not isinstance(other, type(self)):
result = Index.intersection(self, other, sort=sort)
if isinstance(result, type(self)):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
elif (other.freq is None or self.freq is None or
other.freq != self.freq or
not other.freq.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other, sort=sort)
# Invalidate the freq of `result`, which may not be correct at
# this point, depending on the values.
result.freq = None
if hasattr(self, 'tz'):
result = self._shallow_copy(result._values, name=result.name,
tz=result.tz, freq=None)
else:
result = self._shallow_copy(result._values, name=result.name,
freq=None)
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
# after sorting, the intersection always starts with the right index
# and ends with the index of which the last elements is smallest
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
@Appender(_index_shared_docs['repeat'] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
freq = self.freq if is_period_dtype(self) else None
return self._shallow_copy(self.asi8.repeat(repeats), freq=freq)
@Appender(_index_shared_docs['where'] % _index_doc_kwargs)
def where(self, cond, other=None):
other = _ensure_datetimelike_to_i8(other, to_utc=True)
values = _ensure_datetimelike_to_i8(self, to_utc=True)
result = np.where(cond, values, other).astype('i8')
result = self._ensure_localized(result, from_utc=True)
return self._shallow_copy(result)
def _summary(self, name=None):
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
formatter = self._formatter_func
if len(self) > 0:
index_summary = ', %s to %s' % (formatter(self[0]),
formatter(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (printing.pprint_thing(name),
len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
# display as values, not quoted
result = result.replace("'", "")
return result
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class.
"""
attribs = self._get_attributes_dict()
attribs['name'] = name
# do not pass tz to set because tzlocal cannot be hashed
if len({str(x.dtype) for x in to_concat}) != 1:
raise ValueError('to_concat must have the same tz')
new_data = type(self._values)._concat_same_type(to_concat).asi8
# GH 3232: If the concat result is evenly spaced, we can retain the
# original frequency
is_diff_evenly_spaced = len(unique_deltas(new_data)) == 1
if not is_period_dtype(self) and not is_diff_evenly_spaced:
# reset freq
attribs['freq'] = None
return self._simple_new(new_data, **attribs)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype) and copy is False:
# Ensure that self.astype(self.dtype) is self
return self
new_values = self._data.astype(dtype, copy=copy)
# pass copy=False because any copying will be done in the
# _data.astype call above
return Index(new_values,
dtype=new_values.dtype, name=self.name, copy=False)
@deprecate_kwarg(old_arg_name='n', new_arg_name='periods')
def shift(self, periods, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
result = self._data._time_shift(periods, freq=freq)
return type(self)(result, name=self.name)
def wrap_arithmetic_op(self, other, result):
if result is NotImplemented:
return NotImplemented
if isinstance(result, tuple):
# divmod, rdivmod
assert len(result) == 2
return (wrap_arithmetic_op(self, other, result[0]),
wrap_arithmetic_op(self, other, result[1]))
if not isinstance(result, Index):
# Index.__new__ will choose appropriate subclass for dtype
result = Index(result)
res_name = ops.get_op_result_name(self, other)
result.name = res_name
return result
def maybe_unwrap_index(obj):
"""
If operating against another Index object, we need to unwrap the underlying
data before deferring to the DatetimeArray/TimedeltaArray/PeriodArray
implementation, otherwise we will incorrectly return NotImplemented.
Parameters
----------
obj : object
Returns
-------
unwrapped object
"""
if isinstance(obj, ABCIndexClass):
return obj._data
return obj
class DatetimelikeDelegateMixin(PandasDelegate):
"""
Delegation mechanism, specific for Datetime, Timedelta, and Period types.
Functionality is delegated from the Index class to an Array class. A
few things can be customized
* _delegate_class : type
The class being delegated to.
* _delegated_methods, delegated_properties : List
The list of property / method names being delagated.
* raw_methods : Set
The set of methods whose results should should *not* be
boxed in an index, after being returned from the array
* raw_properties : Set
The set of properties whose results should should *not* be
boxed in an index, after being returned from the array
"""
# raw_methods : dispatch methods that shouldn't be boxed in an Index
_raw_methods = set() # type: Set[str]
# raw_properties : dispatch properties that shouldn't be boxed in an Index
_raw_properties = set() # type: Set[str]
name = None
_data = None
@property
def _delegate_class(self):
raise AbstractMethodError
def _delegate_property_get(self, name, *args, **kwargs):
result = getattr(self._data, name)
if name not in self._raw_properties:
result = Index(result, name=self.name)
return result
def _delegate_property_set(self, name, value, *args, **kwargs):
setattr(self._data, name, value)
def _delegate_method(self, name, *args, **kwargs):
result = operator.methodcaller(name, *args, **kwargs)(self._data)
if name not in self._raw_methods:
result = Index(result, name=self.name)
return result
| bsd-3-clause |
nwjs/chromium.src | chrome/test/data/nacl/gdb_rsp.py | 42 | 2542 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is based on gdb_rsp.py file from NaCl repository.
import re
import socket
import time
def RspChecksum(data):
checksum = 0
for char in data:
checksum = (checksum + ord(char)) % 0x100
return checksum
class EofOnReplyException(Exception):
pass
class GdbRspConnection(object):
def __init__(self, addr):
self._socket = self._Connect(addr)
def _Connect(self, addr):
# We have to poll because we do not know when sel_ldr has
# successfully done bind() on the TCP port. This is inherently
# unreliable.
# TODO(mseaborn): Add a more reliable connection mechanism to
# sel_ldr's debug stub.
timeout_in_seconds = 10
poll_time_in_seconds = 0.1
for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)):
# On Mac OS X, we have to create a new socket FD for each retry.
sock = socket.socket()
try:
sock.connect(addr)
except socket.error:
# Retry after a delay.
time.sleep(poll_time_in_seconds)
else:
return sock
raise Exception('Could not connect to sel_ldr\'s debug stub in %i seconds'
% timeout_in_seconds)
def _GetReply(self):
reply = ''
while True:
data = self._socket.recv(1024)
if len(data) == 0:
if reply == '+':
raise EofOnReplyException()
raise AssertionError('EOF on socket reached with '
'incomplete reply message: %r' % reply)
reply += data
if '#' in data:
break
match = re.match('\+\$([^#]*)#([0-9a-fA-F]{2})$', reply)
if match is None:
raise AssertionError('Unexpected reply message: %r' % reply)
reply_body = match.group(1)
checksum = match.group(2)
expected_checksum = '%02x' % RspChecksum(reply_body)
if checksum != expected_checksum:
raise AssertionError('Bad RSP checksum: %r != %r' %
(checksum, expected_checksum))
# Send acknowledgement.
self._socket.send('+')
return reply_body
# Send an rsp message, but don't wait for or expect a reply.
def RspSendOnly(self, data):
msg = '$%s#%02x' % (data, RspChecksum(data))
return self._socket.send(msg)
def RspRequest(self, data):
self.RspSendOnly(data)
return self._GetReply()
def RspInterrupt(self):
self._socket.send('\x03')
return self._GetReply()
| bsd-3-clause |
waterponey/scikit-learn | sklearn/datasets/species_distributions.py | 13 | 7866 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = DIRECTORY_URL + "samples.zip"
COVERAGES_URL = DIRECTORY_URL + "coverages.zip"
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| bsd-3-clause |
hpssjellis/deeplearnjs-javascript-examples | Unordered-tensorflow-examples/aymericdamien-Examples/examples/linear_regression.py | 7 | 2600 | '''
A linear regression learning algorithm example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 2000
display_step = 50
# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Create Model
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
activation = tf.add(tf.mul(X, W), b)
# Minimize the squared errors
cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
"W=", sess.run(W), "b=", sess.run(b)
print "Optimization Finished!"
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n'
# Testing example, as requested (Issue #2)
test_X = numpy.asarray([6.83,4.668,8.9,7.91,5.7,8.7,3.1,2.1])
test_Y = numpy.asarray([1.84,2.273,3.2,2.831,2.92,3.24,1.35,1.03])
print "Testing... (L2 loss Comparison)"
testing_cost = sess.run(tf.reduce_sum(tf.pow(activation-Y, 2))/(2*test_X.shape[0]),
feed_dict={X: test_X, Y: test_Y}) #same function as cost above
print "Testing cost=", testing_cost
print "Absolute l2 loss difference:", abs(training_cost - testing_cost)
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(test_X, test_Y, 'bo', label='Testing data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show() | mit |
ingmarschuster/ModelSelection | modsel/scribblings/eval_is_si.py | 1 | 4431 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 6 09:47:47 2015
@author: Ingmar Schuster
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy as sp
import scipy.stats as stats
from numpy import exp, log, sqrt
from scipy.misc import logsumexp
from numpy.linalg import inv
import modsel.estimator_statistics as es
import cPickle as pick
from copy import copy
import matplotlib as mpl
from modsel.evidence import evidence_from_importance_weights
import matplotlib.pyplot as plt
def plot_var_bias_mse(res, outfname = "plot.pdf", ylabel_pre = ""):
ssize = sorted(res.keys())
st = res[ssize[0]].keys()
st_abs = []
st_rel = []
for s in st:
if s.endswith("(relat)"):
continue
st_rel.append(s)
else:
st_abs.append(s)
st_abs.sort()
st_rel.sort()
st = copy(st_abs)
#st.extend(st_rel)
estimators = res[ssize[0]][st[0]].keys()
fig, axes = plt.subplots(ncols=max(len(st_abs), len(st_rel)), nrows = 1, figsize=(9,3))
for i in range(len(st)):
m = st[i]
a = axes.flat[i]
for e in estimators:
x = np.log(sorted(res.keys()))
y = np.array([res[i][m][e] for i in ssize]).flatten()
#assert()
a.plot(x, y, label=e)
a.set_title("")
a.set_xlabel("log # lhood evals")
a.set_ylabel(ylabel_pre+m)
a.autoscale("both")
a.set_aspect("auto", adjustable="datalim")
lgd = axes[len(st_abs)-1].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#fig.suptitle(title + "; dim=" + str(dims))
# fig.figure(num=1, figsize=(1,3))
fig.tight_layout()
fig.savefig(outfname, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
# {"post":post_param, "prop":prop_param,
# "perm_x":perm_x,
# "log_importance_weights":perm_weights,
# "M": M, "K":K,
# "log_evid":log_evid }
def estim_stats_progression(samp, log_weights, true_exp, true_ev = None, norm_w = True, steps = 10):
idx_norm = np.round(np.logspace(1, np.log10(samp[0].shape[0]), steps, base=10)).astype(int)
idx_infl = np.round(np.logspace(1, np.log10(np.vstack(samp).shape[0]), steps, base=10)).astype(int)
est = {}
ev_est = {}
for i in range(steps):
est[idx_norm[i]] = {"Standard":estim(samp[0][:idx_norm[i]], log_weights[0][:idx_norm[i]], norm_w = norm_w),
"Inflation": estim(np.vstack(samp)[:idx_infl[i]], np.hstack(log_weights)[:idx_infl[i]], norm_w = norm_w),
"GroundTruth": true_exp}
ev_est[idx_norm[i]] = {"Standard": np.atleast_2d(log_weights[0][:idx_norm[i]]).T,
"Inflation": np.atleast_2d(np.hstack(log_weights)[:idx_infl[i]]).T,
"GroundTruth": true_ev}
return (est, ev_est)
def construct_long_run(samp, log_weights):
rval_samp = samp[0]
rval_lw = log_weights[0]
for i in range(len(samp)):
rval_samp
def estim(samp, log_weights, norm_w = True):
if norm_w is True:
log_weights = log_weights - logsumexp(log_weights)
(lsamp, lsamp_sign) = es.log_sign(samp)
(lws, lws_sign) = es.logaddexp(lsamp, np.atleast_2d(log_weights).T, lsamp_sign)
return es.exp_sign(lws, lws_sign).mean(0)
# {"post":post_param, "prop":prop_param,
# "perm_x":perm_x,
# "log_importance_weights":perm_weights,
# "M": M, "K":K,
# "log_evid":log_evid }
def plot(fname, num_runs = 100):
with open(fname, "r") as f:
res = pick.load(f)
perm_x = np.hstack(res["perm_x"][:num_runs]) # stack up to long run
liw = np.hstack(res["log_importance_weights"][:num_runs]) # stack up to long run
std_ss = perm_x[0].shape[0]
infl_ss = len(perm_x)*perm_x[0].shape[0]
print("Standard IS:", std_ss, "samples, Inflated:", infl_ss)
added = "__is_"+str(std_ss)+"_-_issi_"+str(infl_ss)+"_post"+str(res["post"][0])+"_prop"+str(res["prop"][0])+"_M"+str(res["M"])+"_K"+str(res["K"])+"_logevid"+str(res["log_evid"])
print(fname+added)
#return
(s, ev_s) = estim_stats_progression(perm_x, liw, res["post"][0], np.atleast_2d(res["log_evid"]))
s_stat = es.statistics(s)
ev_stat = es.logstatistics(ev_s)
#assert()
plot_var_bias_mse(s_stat, outfname = fname+added+".pdf")
plot_var_bias_mse(ev_stat, outfname = fname+added+"_evidence.pdf", ylabel_pre="log ")
| gpl-3.0 |
artwr/airflow | airflow/contrib/plugins/metastore_browser/main.py | 3 | 6084 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_appbuilder import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www.decorators import gzipped
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a Flask-AppBuilder BaseView
class MetastoreBrowserView(BaseView):
default_view = 'index'
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render_template(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render_template(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render_template(
"metastore_browser/db.html", tables=tables, db=db)
@gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
# Creating a flask blueprint to integrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
appbuilder_views = [{"name": "Hive Metadata Browser",
"category": "Plugins",
"view": MetastoreBrowserView()}]
| apache-2.0 |
lukauskas/scipy | doc/source/tutorial/stats/plots/kde_plot4.py | 142 | 1457 | from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| bsd-3-clause |
neilhan/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 30 | 3738 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(tf.test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with tf.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(tf.test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(dataframe,
batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with tf.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
brycefrank/pyfor | pyfor/rasterizer.py | 1 | 12374 | # Functions for rasterizing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pyfor import gisexport
import pyfor.metrics
class Grid:
"""The Grid object is a representation of a point cloud that has been sorted into X and Y dimensional bins. From \
the Grid object we can derive other useful products, most importantly, :class:`.Raster` objects.
"""
def __init__(self, cloud, cell_size):
"""
Upon initialization, the parent cloud object's :attr:`data.points` attribute is sorted into bins in place. \ The
columns 'bins_x' and 'bins_y' are appended. Other useful information, such as the resolution, number of rows \
and columns are also stored.
:param cloud: The "parent" cloud object.
:param cell_size: The size of the cell for sorting in the units of the input cloud object.
"""
self.cloud = cloud
self.cell_size = cell_size
min_x, max_x = self.cloud.data.min[0], self.cloud.data.max[0]
min_y, max_y = self.cloud.data.min[1], self.cloud.data.max[1]
self.m = int(np.ceil((max_y - min_y) / cell_size))
self.n = int(np.ceil((max_x - min_x) / cell_size))
self.cloud.data.points.loc[:, "bins_x"] = (np.floor((self.cloud.data.points["x"].values - min_x) / self.cell_size)).astype(np.int)
self.cloud.data.points.loc[:, "bins_y"] = (np.floor((max_y - self.cloud.data.points["y"].values) / self.cell_size)).astype(np.int)
self.cells = self.cloud.data.points.groupby(["bins_x", "bins_y"])
def _update(self):
self.cloud.data._update()
self.__init__(self.cloud, self.cell_size)
def raster(self, func, dim, **kwargs):
"""
Generates an m x n matrix with values as calculated for each cell in func. This is a raw array without \
missing cells interpolated. See self.interpolate for interpolation methods.
:param func: A function string, i.e. "max" or a function itself, i.e. :func:`np.max`. This function must be \
able to take a 1D array of the given dimension as an input and produce a single value as an output. This \
single value will become the value of each cell in the array.
:param dim: A dimension to calculate on.
:return: A 2D numpy array where the value of each cell is the result of the passed function.
"""
bin_summary = self.cells.agg({dim: func}, **kwargs).reset_index()
array = np.full((self.m, self.n), np.nan)
array[bin_summary["bins_y"], bin_summary["bins_x"]] = bin_summary[dim]
return Raster(array, self)
@property
def empty_cells(self):
"""
Retrieves the cells with no returns in self.data
return: An N x 2 numpy array where each row cooresponds to the [y x] coordinate of the empty cell.
"""
array = self.raster("count", "z").array
emptys = np.argwhere(np.isnan(array))
return emptys
def interpolate(self, func, dim, interp_method="nearest"):
"""
Interpolates missing cells in the grid. This function uses scipy.griddata as a backend. Please see \
documentation for that function for more details.
:param func: The function (or function string) to calculate an array on the gridded data.
:param dim: The dimension (i.e. column name of self.cells) to cast func onto.
:param interp_method: The interpolation method call for scipy.griddata, one of any: "nearest", "cubic", \
"linear"
:return: An interpolated array.
"""
from scipy.interpolate import griddata
# Get points and values that we already have
cell_values = self.cells[dim].agg(func).reset_index()
points = cell_values[["bins_x", "bins_y"]].values
values = cell_values[dim].values
X, Y = np.mgrid[1 : self.n + 1, 1 : self.m + 1]
# TODO generally a slow approach
interp_grid = griddata(points, values, (X, Y), method=interp_method).T
return Raster(interp_grid, self)
def metrics(self, func_dict, as_raster=False):
"""
Calculates summary statistics for each grid cell in the Grid.
:param func_dict: A dictionary containing keys corresponding to the columns of self.data and values that \
correspond to the functions to be called on those columns.
:return: A pandas dataframe with the aggregated metrics.
"""
# Aggregate on the function
aggregate = self.cells.agg(func_dict)
if as_raster == False:
return aggregate
else:
rasters = []
for column in aggregate:
array = np.asarray(
aggregate[column].reset_index().pivot("bins_y", "bins_x")
)
raster = Raster(array, self)
rasters.append(raster)
# Get list of dimension names
dims = [tup[0] for tup in list(aggregate)]
# Get list of metric names
metrics = [tup[1] for tup in list(aggregate)]
return pd.DataFrame(
{"dim": dims, "metric": metrics, "raster": rasters}
).set_index(["dim", "metric"])
def standard_metrics(self, heightbreak=0):
return pyfor.metrics.standard_metrics_grid(self, heightbreak=heightbreak)
class ImportedGrid(Grid):
"""
ImportedGrid is used to normalize a parent cloud object with an arbitrary raster file.
"""
def __init__(self, path, cloud):
import rasterio
self.in_raster = rasterio.open(path)
# Check cell size
cell_size_x, cell_size_y = (
self.in_raster.transform[0],
abs(self.in_raster.transform[4]),
)
if cell_size_x != cell_size_y:
print("Cell sizes not equal of input raster, not supported.")
raise ValueError
else:
cell_size = cell_size_x
self.cloud = cloud
self.cell_size = cell_size
min_x, max_x = self.in_raster.bounds[0], self.in_raster.bounds[2]
min_y, max_y = self.in_raster.bounds[1], self.in_raster.bounds[3]
self.m = self.in_raster.height
self.n = self.in_raster.width
# Create bins
bins_x = np.searchsorted(
np.linspace(min_x, max_x, self.n), self.cloud.data.points["x"]
)
bins_y = np.searchsorted(
np.linspace(min_y, max_y, self.m), self.cloud.data.points["y"]
)
self.cloud.data.points["bins_x"] = bins_x
self.cloud.data.points["bins_y"] = bins_y
self.cells = self.cloud.data.points.groupby(["bins_x", "bins_y"])
def _update(self):
self.cloud.data._update()
class Raster:
def __init__(self, array, grid):
from rasterio.transform import from_origin
self.grid = grid
self.cell_size = self.grid.cell_size
self.array = array
self._affine = from_origin(
self.grid.cloud.data.min[0],
self.grid.cloud.data.max[1],
self.grid.cell_size,
self.grid.cell_size,
)
@classmethod
def from_rasterio(cls):
pass
def force_extent(self, bbox):
"""
Sets `self._affine` and `self.array` to a forced bounding box. Useful for trimming edges off of rasters when
processing buffered tiles. This operation is done in place.
:param bbox: Coordinates of output raster as a tuple (min_x, max_x, min_y, max_y)
"""
from rasterio.transform import from_origin
new_left, new_right, new_bot, new_top = bbox
m, n = self.array.shape[0], self.array.shape[1]
# Maniupulate the array to fit the new affine transformation
old_left, old_top = self.grid.cloud.data.min[0], self.grid.cloud.data.max[1]
old_right, old_bot = (
old_left + n * self.grid.cell_size,
old_top - m * self.grid.cell_size,
)
left_diff, top_diff, right_diff, bot_diff = (
old_left - new_left,
old_top - new_top,
old_right - new_right,
old_bot - new_bot,
)
left_diff, top_diff, right_diff, bot_diff = (
int(np.rint(left_diff / self.cell_size)),
int(np.rint(top_diff / self.cell_size)),
int(np.rint(right_diff / self.cell_size)),
int(np.rint(bot_diff / self.cell_size)),
)
if left_diff > 0:
# bbox left is outside of raster left, we need to add columns of nans
emptys = np.empty((m, left_diff))
emptys[:] = np.nan
self.array = np.insert(self.array, 0, np.transpose(emptys), axis=1)
elif left_diff != 0:
# bbox left is inside of raster left, we need to remove left diff columns
self.array = self.array[:, abs(left_diff) :]
if top_diff < 0:
# bbox top is outside of raster top, we need to add rows of nans
emptys = np.empty((abs(top_diff), self.array.shape[1]))
emptys[:] = np.nan
self.array = np.insert(self.array, 0, emptys, axis=0)
elif top_diff != 0:
# bbox top is inside of raster top, we need to remove rows of nans
self.array = self.array[abs(top_diff) :, :]
if right_diff < 0:
# bbox right is outside of raster right, we need to add columns of nans
emptys = np.empty((self.array.shape[0], abs(right_diff)))
emptys[:] = np.nan
self.array = np.append(self.array, emptys, axis=1)
elif right_diff != 0:
# bbox right is inside raster right, we need to remove columns
self.array = self.array[:, :-right_diff]
if bot_diff > 0:
# bbox bottom is outside of raster bottom, we need to add rows of nans
emptys = np.empty((abs(bot_diff), self.array.shape[1]))
emptys[:] = np.nan
self.array = np.append(self.array, emptys, axis=0)
elif bot_diff != 0:
# bbox bottom is inside of raster bottom, we need to remove columns
self.array = self.array[:bot_diff, :]
# Handle the affine transformation
new_affine = from_origin(
new_left, new_top, self.grid.cell_size, self.grid.cell_size
)
self._affine = new_affine
def plot(self, cmap="viridis", block=False, return_plot=False):
"""
Default plotting method for the Raster object.
"""
# TODO implement cmap
fig = plt.figure()
ax = fig.add_subplot(111)
caz = ax.matshow(self.array)
fig.colorbar(caz)
ax.xaxis.tick_bottom()
ax.set_xticks(np.linspace(0, self.grid.n, 3))
ax.set_yticks(np.flip(np.linspace(0, self.grid.m, 3)))
x_ticks, y_ticks = (
np.rint(
np.linspace(self.grid.cloud.data.min[0], self.grid.cloud.data.max[0], 3)
),
np.rint(
np.linspace(self.grid.cloud.data.min[1], self.grid.cloud.data.max[1], 3)
),
)
ax.set_xticklabels(x_ticks)
ax.set_yticklabels(y_ticks)
if return_plot == True:
return ax
else:
plt.show(block=block)
def pit_filter(self, kernel_size):
"""
Filters pits in the raster. Intended for use with canopy height models (i.e. grid(0.5).interpolate("max", "z").
This function modifies the raster array **in place**.
:param kernel_size: The size of the kernel window to pass over the array. For example 3 -> 3x3 kernel window.
"""
from scipy.signal import medfilt2d
self.array = medfilt2d(self.array, kernel_size=kernel_size)
def write(self, path):
"""
Writes the raster to a geotiff. Requires the Cloud.crs attribute to be filled by a projection string (ideally \
wkt or proj4).
:param path: The path to write to.
"""
if not self.grid.cloud.crs:
from warnings import warn
warn(
"No coordinate reference system defined. Please set the .crs attribute of the Cloud object.",
UserWarning,
)
gisexport.array_to_raster(self.array, self._affine, self.grid.cloud.crs, path)
| mit |
howthebodyworks/mlmistrels | src/encoder.py | 1 | 3657 | #!/bin/local/bin/python3
# A utility, script, or potentially one day class to convert audio waveforms in to windowed, reduced descriptors, for some machine learning algorithm to go nuts on later
# Authors: James Nichols, Darwin Vickers
# Includes a test of converting then reversing the predictor to see how things sound. Uses Librosa extensively.
import librosa
import matplotlib.pyplot as plt
#import seaborn as sns
import numpy as np
from util import compress
from reconstruct import reconstruct
def randomise_phase(D):
""" A function that takes reals of any and randomises all the phases,
it does so by randomly modifying the angle of a complex number """
# Create a univariate matrix, use the euler identity to make
# uniformly distributed complex numbers of arg 1
rands = np.exp(np.vectorize(complex)(0, 2.0 * np.pi * np.random.random(D.shape)))
return D * rands
class Encode(object):
def __init__(self, win_len = 2048):
self.win_len = win_len
def encode(self, sound):
return sound
def decode(self, A):
return A
class SparseEncode(Encode):
""" An encoder that uses sparse tensor representation of the spectrogram """
def __init__(self, win_len = 2048, n_decomp = 4):
import sklearn.decomposition
self.win_len = win_len
self.n_decomp = n_decomp
self.T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=self.n_decomp)
def encode(self, sound):
D = librosa.stft(sound, n_fft=self.win_len)
self.comps, A = librosa.decompose.decompose(np.abs(D), transformer=self.T, sort=True)
return A
def decode(self, A):
return librosa.istft(randomise_phase(self.comps.dot(A)))
class BinEncode(Encode):
def __init__(self, win_len = 2048, n_bins = 32):
self.win_len = win_len
self.n_bins = 32
self.bin_size = self.win_len // (2 * self.n_bins)
def encode(self, sound):
D = librosa.stft(sound, n_fft=self.win_len)
# Make the time series of predictors
A = np.zeros([self.n_bins+1, D.shape[1]], dtype=np.complex)
# Try bin-power
for t in range(D.shape[1]):
# Consider the 0 Hz component separately (Maybe get rid of this...?)
A[0, t] = D[0, t]
# Simple mean of the complex numbers n the bin...
A[1:,t] = np.array([np.mean(D[b_start:b_start+self.bin_size,t])*self.bin_size for b_start in range(1, D.shape[0], self.bin_size)])
return A
def decode(self, A):
D = np.zeros((self.win_len//2+1, A.shape[1]), dtype=np.complex)
for t in range(A.shape[1]):
# Simple covering of the bin with mean of the bin
D[0, t] = A[0, t]
D[1:, t] = np.repeat(A[1:, t], self.bin_size)
# The center frequency is given the average
#D_r[0, t] = TS[0, t]
#D_r[1+bin_size//2:D_r.shape[0]:bin_size, t] = TS[1:, t]
# Random frequency in bin is given the average
return librosa.istft(randomise_phase(D))
class PeaksEncode(Encode):
hop_length = 0
sound_length = 0
def __init__(self, win_len = 2048):
self.win_len = win_len
self.hop_length = win_len/4
self.sr=22050
def encode(self, sound):
H_pitch, H_pitch_mag = lr.piptrack(audio, sr = self.sr, n_fft = self.win_len, hop_length = self.hop_length)
features = compress(H_pitch, H_pitch_mag, n_peaks = 16)
return features
def decode(self, A):
reconstruct(A, n_fft = self.win_length, sr = self.sr, hop_length = self.hop_length)
return A | gpl-3.0 |
stshine/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
alexandrebarachant/mne-python | examples/connectivity/plot_mne_inverse_connectivity_spectrum.py | 13 | 3458 | """
==============================================================
Compute full spectrum source space connectivity between labels
==============================================================
The connectivity is computed between 4 labels across the spectrum
between 5 and 40 Hz.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.connectivity import spectral_connectivity
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
# Read some labels
names = ['Aud-lh', 'Aud-rh', 'Vis-lh', 'Vis-rh']
labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % name)
for name in names]
# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
return_generator=True)
fmin, fmax = 5., 40.
sfreq = raw.info['sfreq'] # the sampling frequency
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method='wpli2_debiased', mode='multitaper', sfreq=sfreq,
fmin=fmin, fmax=fmax, mt_adaptive=True, n_jobs=1)
n_rows, n_cols = con.shape[:2]
fig, axes = plt.subplots(n_rows, n_cols, sharex=True, sharey=True)
plt.suptitle('Between labels connectivity')
for i in range(n_rows):
for j in range(i + 1):
if i == j:
axes[i, j].set_axis_off()
continue
axes[i, j].plot(freqs, con[i, j, :])
axes[j, i].plot(freqs, con[i, j, :])
if j == 0:
axes[i, j].set_ylabel(names[i])
axes[0, i].set_title(names[i])
if i == (n_rows - 1):
axes[i, j].set_xlabel(names[j])
axes[i, j].set_xlim([fmin, fmax])
axes[j, i].set_xlim([fmin, fmax])
# Show band limits
for f in [8, 12, 18, 35]:
axes[i, j].axvline(f, color='k')
axes[j, i].axvline(f, color='k')
plt.show()
| bsd-3-clause |
IndraVikas/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
SeldonIO/seldon-server | python/seldon/sklearn_estimator.py | 3 | 2924 | from sklearn.feature_extraction import DictVectorizer
from seldon.pipeline.pandas_pipelines import BasePandasEstimator
from collections import OrderedDict
import io
from sklearn.utils import check_X_y
from sklearn.utils import check_array
from sklearn.base import BaseEstimator,ClassifierMixin
import pandas as pd
class SKLearnClassifier(BasePandasEstimator,BaseEstimator,ClassifierMixin):
"""
Wrapper for XGBoost classifier with pandas support
XGBoost specific arguments follow https://github.com/dmlc/xgboost/blob/master/python-package/xgboost/sklearn.py
clf : sklearn estimator
sklearn estimator to run
target : str
Target column
target_readable : str
More descriptive version of target variable
included : list str, optional
columns to include
excluded : list str, optional
columns to exclude
id_map : dict (int,str), optional
map of class ids to high level names
sk_args : str, optional
extra args for sklearn classifier
"""
def __init__(self, clf=None,target=None, target_readable=None,included=None,excluded=None,id_map={},vectorizer=None,**sk_args):
super(SKLearnClassifier, self).__init__(target,target_readable,included,excluded,id_map)
self.vectorizer = vectorizer
self.clf = clf
self.sk_args = sk_args
def fit(self,X,y=None):
"""
Fit an sklearn classifier to data
Parameters
----------
X : pandas dataframe or array-like
training samples
y : array like, required for array-like X and not used presently for pandas dataframe
class labels
Returns
-------
self: object
"""
if isinstance(X,pd.DataFrame):
df = X
(X,y,self.vectorizer) = self.convert_numpy(df)
else:
check_X_y(X,y)
self.clf.fit(X,y)
return self
def predict_proba(self,X):
"""
Returns class probability estimates for the given test data.
X : pandas dataframe or array-like
Test samples
Returns
-------
proba : array-like, shape = (n_samples, n_outputs)
Class probability estimates.
"""
if isinstance(X,pd.DataFrame):
df = X
(X,_,_) = self.convert_numpy(df)
else:
check_array(X)
return self.clf.predict_proba(X)
def predict(self,X):
"""
Returns class predictions
X : pandas dataframe or array-like
Test samples
Returns
-------
proba : array-like, shape = (n_samples, n_outputs)
Class predictions
"""
if isinstance(X,pd.DataFrame):
df = X
(X,_,_) = self.convert_numpy(df)
else:
check_array(X)
return self.clf.predict(X)
| apache-2.0 |
alexeyum/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 43 | 24671 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
shangwuhencc/scikit-learn | sklearn/utils/tests/test_testing.py | 107 | 4210 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
hammerlab/cohorts | cohorts/plot.py | 1 | 10847 | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from scipy.stats import mannwhitneyu, fisher_exact
import seaborn as sb
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
from .model import bootstrap_auc
def vertical_percent(plot, percent=0.1):
"""
Using the size of the y axis, return a fraction of that size.
"""
plot_bottom, plot_top = plot.get_ylim()
return percent * (plot_top - plot_bottom)
def as_numeric(text):
try:
return float(text)
except:
return None
def hide_ticks(plot, min_tick_value=None, max_tick_value=None):
"""Hide tick values that are outside of [min_tick_value, max_tick_value]"""
for tick, tick_value in zip(plot.get_yticklabels(), plot.get_yticks()):
tick_label = as_numeric(tick_value)
if tick_label:
if (min_tick_value is not None and tick_label < min_tick_value or
max_tick_value is not None and tick_label > max_tick_value):
tick.set_visible(False)
def hide_negative_y_ticks(plot):
hide_ticks(plot, min_tick_value=0)
def only_percentage_ticks(plot):
"""
Only show ticks from 0.0 to 1.0.
"""
hide_ticks(plot, min_tick_value=0, max_tick_value=1.0)
def add_significance_indicator(plot, col_a=0, col_b=1, significant=False):
"""
Add a p-value significance indicator.
"""
plot_bottom, plot_top = plot.get_ylim()
# Give the plot a little room for the significance indicator
line_height = vertical_percent(plot, 0.1)
# Add some extra spacing below the indicator
plot_top = plot_top + line_height
# Add some extra spacing above the indicator
plot.set_ylim(top=plot_top + line_height * 2)
color = "black"
line_top = plot_top + line_height
plot.plot([col_a, col_a, col_b, col_b], [plot_top, line_top, line_top, plot_top], lw=1.5, color=color)
indicator = "*" if significant else "ns"
plot.text((col_a + col_b) * 0.5, line_top, indicator, ha="center", va="bottom", color=color)
def stripboxplot(x, y, data, ax=None, significant=None, **kwargs):
"""
Overlay a stripplot on top of a boxplot.
"""
ax = sb.boxplot(
x=x,
y=y,
data=data,
ax=ax,
fliersize=0,
**kwargs
)
plot = sb.stripplot(
x=x,
y=y,
data=data,
ax=ax,
jitter=kwargs.pop("jitter", 0.05),
color=kwargs.pop("color", "0.3"),
**kwargs
)
if data[y].min() >= 0:
hide_negative_y_ticks(plot)
if significant is not None:
add_significance_indicator(plot=plot, significant=significant)
return plot
def sided_str_from_alternative(alternative, condition):
if alternative is None:
raise ValueError("Must pick an alternative")
if alternative == "two-sided":
return alternative
# alternative hypothesis: condition is 'less' or 'greater' than no-condition
op_str = ">" if alternative == "greater" else "<"
return "one-sided: %s %s not %s" % (condition, op_str, condition)
def get_condition_mask(df, condition, condition_value):
if condition_value:
condition_mask = df[condition] == condition_value
else:
# This is necessary in the event that condition has a non-bool dtype,
# such as object. This may happen if a function returns np.nan in
# addition to True/False (later filtered down to just True/False).
# ~condition_mask will behave incorrectly if dtype is not bool.
condition_mask = df[condition].astype("bool")
return condition_mask
class FishersExactResults(namedtuple("FishersExactResults", ["oddsratio", "p_value", "sided_str", "with_condition1_series", "without_condition1_series", "plot"])):
def __str__(self):
return "FishersExactResults(oddsratio=%s, p_value=%s, sided_str='%s')" % (
self.oddsratio, self.p_value, self.sided_str)
def __repr__(self):
return self.__str__()
def fishers_exact_plot(data, condition1, condition2, ax=None,
condition1_value=None,
alternative="two-sided", **kwargs):
"""
Perform a Fisher's exact test to compare to binary columns
Parameters
----------
data: Pandas dataframe
Dataframe to retrieve information from
condition1: str
First binary column to compare (and used for test sidedness)
condition2: str
Second binary column to compare
ax : Axes, default None
Axes to plot on
condition1_value:
If `condition1` is not a binary column, split on =/!= to condition1_value
alternative:
Specify the sidedness of the test: "two-sided", "less"
or "greater"
"""
plot = sb.barplot(
x=condition1,
y=condition2,
ax=ax,
data=data,
**kwargs
)
plot.set_ylabel("Percent %s" % condition2)
condition1_mask = get_condition_mask(data, condition1, condition1_value)
count_table = pd.crosstab(data[condition1], data[condition2])
print(count_table)
oddsratio, p_value = fisher_exact(count_table, alternative=alternative)
add_significance_indicator(plot=plot, significant=p_value <= 0.05)
only_percentage_ticks(plot)
if alternative != "two-sided":
raise ValueError("We need to better understand the one-sided Fisher's Exact test")
sided_str = "two-sided"
print("Fisher's Exact Test: OR: {}, p-value={} ({})".format(oddsratio, p_value, sided_str))
return FishersExactResults(oddsratio=oddsratio,
p_value=p_value,
sided_str=sided_str,
with_condition1_series=data[condition1_mask][condition2],
without_condition1_series=data[~condition1_mask][condition2],
plot=plot)
class MannWhitneyResults(namedtuple("MannWhitneyResults", ["U", "p_value", "sided_str", "with_condition_series", "without_condition_series", "plot"])):
def __str__(self):
return "MannWhitneyResults(U=%s, p_value=%s, sided_str='%s')" % (
self.U, self.p_value, self.sided_str)
def __repr__(self):
return self.__str__()
def mann_whitney_plot(data,
condition,
distribution,
ax=None,
condition_value=None,
alternative="two-sided",
skip_plot=False,
**kwargs):
"""
Create a box plot comparing a condition and perform a
Mann Whitney test to compare the distribution in condition A v B
Parameters
----------
data: Pandas dataframe
Dataframe to retrieve information from
condition: str
Column to use as the splitting criteria
distribution: str
Column to use as the Y-axis or distribution in the test
ax : Axes, default None
Axes to plot on
condition_value:
If `condition` is not a binary column, split on =/!= to condition_value
alternative:
Specify the sidedness of the Mann-Whitney test: "two-sided", "less"
or "greater"
skip_plot:
Calculate the test statistic and p-value, but don't plot.
"""
condition_mask = get_condition_mask(data, condition, condition_value)
U, p_value = mannwhitneyu(
data[condition_mask][distribution],
data[~condition_mask][distribution],
alternative=alternative
)
plot = None
if not skip_plot:
plot = stripboxplot(
x=condition,
y=distribution,
data=data,
ax=ax,
significant=p_value <= 0.05,
**kwargs
)
sided_str = sided_str_from_alternative(alternative, condition)
print("Mann-Whitney test: U={}, p-value={} ({})".format(U, p_value, sided_str))
return MannWhitneyResults(U=U,
p_value=p_value,
sided_str=sided_str,
with_condition_series=data[condition_mask][distribution],
without_condition_series=data[~condition_mask][distribution],
plot=plot)
class CorrelationResults(namedtuple("CorrelationResults", ["coeff", "p_value", "stat_func", "series_x", "series_y", "plot"])):
def __str__(self):
return "CorrelationResults(coeff=%s, p_value=%s, stat_func=%s)" % (
self.coeff, self.p_value, self.stat_func.__name__)
def __repr__(self):
return self.__str__()
def roc_curve_plot(data, value_column, outcome_column, bootstrap_samples=100, ax=None):
"""Create a ROC curve and compute the bootstrap AUC for the given variable and outcome
Parameters
----------
data : Pandas dataframe
Dataframe to retrieve information from
value_column : str
Column to retrieve the values from
outcome_column : str
Column to use as the outcome variable
bootstrap_samples : int, optional
Number of bootstrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_bootstrap_auc, roc_plot) : (float, matplotlib plot)
Mean AUC for the given number of bootstrap samples and the plot
"""
scores = bootstrap_auc(df=data,
col=value_column,
pred_col=outcome_column,
n_bootstrap=bootstrap_samples)
mean_bootstrap_auc = scores.mean()
print("{}, Bootstrap (samples = {}) AUC:{}, std={}".format(
value_column, bootstrap_samples, mean_bootstrap_auc, scores.std()))
outcome = data[outcome_column].astype(int)
values = data[value_column]
fpr, tpr, thresholds = roc_curve(outcome, values)
if ax is None:
ax = plt.gca()
roc_plot = ax.plot(fpr, tpr, lw=1, label=value_column)
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.legend(loc=2, borderaxespad=0.)
ax.set_title('{} ROC Curve (n={})'.format(value_column, len(values)))
return (mean_bootstrap_auc, roc_plot)
| apache-2.0 |
takluyver/xray | xray/indexing.py | 1 | 8564 | import numpy as np
import utils
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError('too many indices')
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key)
def canonicalize_indexer(key, ndim):
"""Given an indexer for orthogonal array indexing, return an indexer that
is a tuple composed entirely of slices, integer ndarrays and native python
ints.
"""
def canonicalize(indexer):
if not isinstance(indexer, slice):
indexer = np.asarray(indexer)
if indexer.ndim == 0:
indexer = int(np.asscalar(indexer))
if isinstance(indexer, np.ndarray):
if indexer.ndim != 1:
raise ValueError('orthogonal array indexing only supports '
'1d arrays')
if indexer.dtype.kind == 'b':
indexer, = np.nonzero(indexer)
elif indexer.dtype.kind != 'i':
raise ValueError('invalid subkey %r for integer based '
'array indexing; all subkeys must be '
'slices, integers or sequences of '
'integers or Booleans' % indexer)
return indexer
return tuple(canonicalize(k) for k in expanded_indexer(key, ndim))
def orthogonal_indexer(key, shape):
"""Given a key for orthogonal array indexing, returns an equivalent key
suitable for indexing a numpy.ndarray with fancy indexing.
"""
def expand_key(k, length):
if isinstance(k, slice):
return np.arange(k.start or 0, k.stop or length, k.step or 1)
else:
return k
# replace Ellipsis objects with slices
key = list(canonicalize_indexer(key, len(shape)))
# replace 1d arrays and slices with broadcast compatible arrays
# note: we treat integers separately (instead of turning them into 1d
# arrays) because integers (and only integers) collapse axes when used with
# __getitem__
non_int_keys = [n for n, k in enumerate(key) if not isinstance(k, int)]
def full_slices_unselected(n_list):
def all_full_slices(key_index):
return all(isinstance(key[n], slice) and key[n] == slice(None)
for n in key_index)
if not n_list:
return n_list
elif all_full_slices(range(n_list[0] + 1)):
return full_slices_unselected(n_list[1:])
elif all_full_slices(range(n_list[-1], len(key))):
return full_slices_unselected(n_list[:-1])
else:
return n_list
# However, testing suggests it is OK to keep contiguous sequences of full
# slices at the start or the end of the key. Keeping slices around (when
# possible) instead of converting slices to arrays significantly speeds up
# indexing.
# (Honestly, I don't understand when it's not OK to keep slices even in
# between integer indices if as array is somewhere in the key, but such are
# the admittedly mind-boggling ways of numpy's advanced indexing.)
array_keys = full_slices_unselected(non_int_keys)
array_indexers = np.ix_(*(expand_key(key[n], shape[n])
for n in array_keys))
for i, n in enumerate(array_keys):
key[n] = array_indexers[i]
return tuple(key)
def convert_label_indexer(index, label, index_name=''):
"""Given a pandas.Index (or xray.Coordinate) and labels (e.g., from
__getitem__) for one dimension, return an indexer suitable for indexing an
ndarray along that dimension
"""
if isinstance(label, slice):
indexer = index.slice_indexer(label.start, label.stop, label.step)
else:
label = np.asarray(label)
if label.ndim == 0:
indexer = index.get_loc(np.asscalar(label))
else:
indexer = index.get_indexer(label)
if np.any(indexer < 0):
raise ValueError('not all values found in index %r'
% index_name)
return indexer
def remap_label_indexers(data_obj, indexers):
"""Given an xray data object and label based indexers, return a mapping
of equivalent location based indexers.
"""
return {dim: convert_label_indexer(data_obj.coordinates[dim], label, dim)
for dim, label in indexers.iteritems()}
def _expand_slice(slice_, size):
return np.arange(*slice_.indices(size))
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + step
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step)
def _index_indexer_1d(old_indexer, applied_indexer, size):
assert isinstance(applied_indexer, (int, slice, np.ndarray))
if isinstance(applied_indexer, slice) and applied_indexer == slice(None):
# shortcut for the usual case
return old_indexer
if isinstance(old_indexer, slice):
if isinstance(applied_indexer, slice):
indexer = slice_slice(old_indexer, applied_indexer, size)
else:
indexer = _expand_slice(old_indexer, size)[applied_indexer]
else:
indexer = old_indexer[applied_indexer]
return indexer
class LazilyIndexedArray(utils.NDArrayMixin):
"""Wrap an array that handles orthogonal indexing to make indexing lazy
"""
def __init__(self, array, key=None):
"""
Parameters
----------
array : array_like
Array like object to index.
key : tuple, optional
Array indexer. If provided, it is assumed to already be in
canonical expanded form.
"""
if key is None:
key = (slice(None),) * array.ndim
self.array = array
self.key = key
def _updated_key(self, new_key):
new_key = iter(canonicalize_indexer(new_key, self.ndim))
key = []
for size, k in zip(self.array.shape, self.key):
if isinstance(k, int):
key.append(k)
else:
key.append(_index_indexer_1d(k, new_key.next(), size))
return tuple(key)
@property
def shape(self):
shape = []
for size, k in zip(self.array.shape, self.key):
if isinstance(k, slice):
shape.append(len(xrange(*k.indices(size))))
elif isinstance(k, np.ndarray):
shape.append(k.size)
return tuple(shape)
@property
def values(self):
return self.array[self.key]
def __array__(self, dtype=None):
return np.asarray(self.values, dtype=None)
def __getitem__(self, key):
return type(self)(self.array, self._updated_key(key))
def __setitem__(self, key, value):
key = self._updated_key(key)
self.array[key] = value
def __repr__(self):
return ('%s(array=%r, key=%r)' %
(type(self).__name__, self.array, self.key))
| apache-2.0 |
466152112/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
mnschmit/piano-note-recognition | midi_comparaison.py | 1 | 3581 | #!/usr/bin/python
usage='''
Usage: own_midi_comparaison.py filename.wav filename.mid [pitch_min pitch_max filtering]
Mandatory arguments : two files to compare
Optional arguments : pitch_min (smallest pitch considered), pitch_max (biggest pitch considered), filtering (true or false)
'''
import sys
from librosa import load, stft, logamplitude, note_to_midi, midi_to_hz
import numpy as np
if len(sys.argv) <= 2:
print usage
sys.exit(-1)
filename = sys.argv[1]
midi_filename = sys.argv[2]
pitch_min = note_to_midi('C1')
if len(sys.argv) > 3:
pitch_min = note_to_midi(sys.argv[3])
pitch_max = note_to_midi('C7')
if len(sys.argv) > 4:
pitch_max = note_to_midi(sys.argv[4])
pitches = range(pitch_min, pitch_max + 1)
#pitches = note_to_midi(['C4', 'D4', 'E4', 'F4', 'G4', 'A4', 'B4', 'C5'])
filtering = True
if len(sys.argv) > 5:
if sys.argv[5] == "false":
filtering = False
elif sys.argv[4] == "true":
filtering = True
else:
print "Error reading filtering argument. Assuming true."
### main program ###
x, sr = load(filename)
# compute normal STFT
n_components = len(pitches)
n_fft = 2048
hop_length = n_fft * 3 / 4# big hop_length
X = stft(x, n_fft=n_fft, hop_length=hop_length)
### midi visualization ###
from Midi import midi_matrix
midi_mat = midi_matrix(midi_filename, min_pitch=pitch_min, max_pitch=pitch_max)
### NMF ###
V = np.abs(X)
## custom initialisation ##
W_zero = np.zeros((V.shape[0], n_components)).transpose()
threshold = 0.1
index = 0
#pitch = pitch_min
for comp in W_zero:
h = 1
fund_freq = midi_to_hz(pitches[index])
while int(fund_freq*h*n_fft/sr) < W_zero.shape[1]:
for freq in range(int(fund_freq*h*n_fft/sr * (2**(-threshold))), int(fund_freq*h*n_fft/sr * (2**threshold))):
if freq < W_zero.shape[1]:
comp[freq] = 1.0 / h
h += 1
index += 1
W_zero = W_zero.transpose()
H_zero = np.ones((n_components, V.shape[1]))
from NMF import factorize
comps, acts = factorize(V, W_zero, H_zero)
# filtering activations
if filtering:
filter_threshold = np.max(acts) / 5
for i in range(1, acts.shape[0]):
for j in range(0, acts.shape[1]):
if acts[i-1][j] > filter_threshold and acts[i-1][j] > acts[i][j]:
acts[i-1][j] += acts[i][j]
acts[i][j] = 0
acts[acts < filter_threshold] = 0
# visualisation matters
import matplotlib.pyplot as plt
from librosa.display import specshow
import matplotlib.gridspec as gridspec
plt.close('all')
#plt.subplot2grid((4, 2), (0,0))
#specshow(W_zero, sr=sr, hop_length=n_fft/4, n_yticks=10, y_axis='linear')
#plt.title('Initialised components')
#plt.subplot2grid((4, 2), (0,1))
#specshow(H_zero, sr=sr, x_axis='time')
#plt.title('Randomly initialised activations')
#plt.subplot2grid((3, 2), (0,0), colspan=2)
#specshow(V, sr=sr, x_axis='time', y_axis='linear')
#plt.colorbar()
#plt.title('Input power spectrogram')
plt.subplot2grid((1, 2), (0,0))
specshow(midi_mat, n_yticks=25, y_axis='cqt_note', fmin=midi_to_hz(pitch_min))
plt.xlabel('Ticks')
plt.title('Midi reference')
plt.subplot2grid((1, 2), (0,1))
specshow(acts, sr=sr, hop_length=hop_length, n_yticks=25, y_axis='cqt_note', x_axis='time', fmin=midi_to_hz(pitch_min))
plt.colorbar()
plt.ylabel('Components')
plt.title('Determined Activations')
#plt.subplot2grid((3, 2), (2,0), colspan=2)
#V_approx = comps.dot(acts)
#specshow(V_approx, sr=sr, x_axis='time', y_axis='linear')
#plt.colorbar()
#plt.title('Reconstructed spectrum')
plt.tight_layout()
plt.show()
| gpl-2.0 |
annahs/atmos_research | NC_MAC_vs_alt_single_coating_thickness.py | 1 | 16412 | from pymiecoated import Mie
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
import calendar
from scipy.optimize import curve_fit
assumed_coating_th = [41,43,44,40,38,40,44,41,38,38] #nm sc1-7
assumed_coating_th = [43,57,50,57,51,47,46,40,30,17] #nm sc10
wavelength = 550 #nm
rBC_RI = complex(2.26,1.26)
savefig = False
show_distr_plots = False
#alt parameters
min_alt = 0
max_alt = 5000
alt_incr = 500
#distr parameters
bin_value_min = 80
bin_value_max = 220
bin_incr = 10
bin_number_lim = (bin_value_max-bin_value_min)/bin_incr
#constants
R = 8.3144621 # in m3*Pa/(K*mol)
flight_times = {
'science 1' : [datetime(2015,4,5,9,0),datetime(2015,4,5,14,0),15.6500, 78.2200] ,
##'ferry 1' : [datetime(2015,4,6,9,0),datetime(2015,4,6,11,0),15.6500, 78.2200] ,
##'ferry 2' : [datetime(2015,4,6,15,0),datetime(2015,4,6,18,0),-16.6667, 81.6000] ,
#'science 2' : [datetime(2015,4,7,16,0),datetime(2015,4,7,21,0),-62.338, 82.5014] ,
#'science 3' : [datetime(2015,4,8,13,0),datetime(2015,4,8,17,0),-62.338, 82.5014] ,
#'science 4' : [datetime(2015,4,8,17,30),datetime(2015,4,8,22,0),-70.338, 82.5014] ,
#'science 5' : [datetime(2015,4,9,13,30),datetime(2015,4,9,18,0),-62.338, 82.0] ,
##'ferry 3' : [datetime(2015,4,10,14,0),datetime(2015,4,10,17,0),-75.338, 81] ,
#'science 6' : [datetime(2015,4,11,15,0),datetime(2015,4,11,22,0),-90.9408, 80.5] ,
#'science 7' : [datetime(2015,4,13,15,0),datetime(2015,4,13,21,0),-95, 80.1] ,
#'science 8' : [datetime(2015,4,20,15,0),datetime(2015,4,20,20,0),-133.7306, 67.1],
#'science 9' : [datetime(2015,4,20,21,0),datetime(2015,4,21,2,0),-133.7306, 69.3617] ,
#'science 10' : [datetime(2015,4,21,16,0),datetime(2015,4,21,22,0),-131, 69.55],
}
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
def lognorm(x_vals, A, w, xc):
return A/(np.sqrt(2*math.pi)*w*x_vals)*np.exp(-(np.log(x_vals/xc))**2/(2*w**2))
def MieCalc(wavelength,core_dia,coat_th):
mie = Mie()
wl = wavelength
core_rad = core_dia/2 #nm
shell_thickness = coat_th
size_par = 2*math.pi*core_rad*1/wl
#Refractive indices PSL 1.59-0.0i rBC 2.26- 1.26i shell 1.5-0.0i
core_RI = rBC_RI
shell_rad = core_rad + shell_thickness
shell_RI = complex(1.5,0.0)
mie.x = 2*math.pi*core_rad/wl
mie.m = core_RI
mie.y = 2*math.pi*shell_rad/wl
mie.m2 = shell_RI
abs = mie.qabs()
abs_xs_nm2 = abs*math.pi*shell_rad**2 #in nm^2
abs_xs = abs_xs_nm2*1e-14 #in cm^2
sca = mie.qsca()
sca_xs_nm2 = sca*math.pi*shell_rad**2 #in nm^2
sca_xs = sca_xs_nm2*1e-14 #in cm^2
ext_xs = sca_xs+abs_xs
return [abs_xs,sca_xs,ext_xs]
#bin and step size for extrapolating to the full distr
fit_bins = []
for x in range (30,1000,1):
fit_bins.append(x)
plot_data={}
for flight in flight_times:
print flight
lower_alt = min_alt
start_time = flight_times[flight][0]
end_time = flight_times[flight][1]
UNIX_start_time = calendar.timegm(start_time.utctimetuple())
UNIX_end_time = calendar.timegm(end_time.utctimetuple())
alt = 0
while (lower_alt + alt_incr) <= max_alt:
#make data binning dicts for the interval
mass_binned_data = {}
number_binned_data = {}
i = bin_value_min
while i < bin_value_max:
mass_binned_data[i] = []
number_binned_data[i] = []
i+=bin_incr
#get mass data
cursor.execute(('SELECT bnm.70t80,bnm.80t90,bnm.90t100,bnm.100t110,bnm.110t120,bnm.120t130,bnm.130t140,bnm.140t150,bnm.150t160,bnm.160t170,bnm.170t180,bnm.180t190,bnm.190t200,bnm.200t210,bnm.210t220,bnm.sampled_vol,bnm.total_mass, ftd.temperature_C,ftd.BP_Pa from polar6_binned_mass_and_sampled_volume_alertcalib bnm join polar6_flight_track_details ftd ON bnm.flight_track_data_id = ftd.id WHERE ftd.alt >=%s and ftd.alt < %s and bnm.UNIX_UTC_ts >= %s and bnm.UNIX_UTC_ts < %s'),(lower_alt,(lower_alt + alt_incr),UNIX_start_time,UNIX_end_time))
mass_data = cursor.fetchall()
for row in mass_data:
volume_sampled = row[15]
total_mass = row[16]
temperature = row[17] + 273.15 #convert to Kelvin
pressure = row[18]
correction_factor_for_STP = (101325/pressure)*(temperature/273)
total_mass_conc_value = total_mass*correction_factor_for_STP/volume_sampled
#append STP corrected mass conc to dict of binned data
i=1
j=bin_value_min
while i <= bin_number_lim:
mass_binned_data[j].append(row[i]*correction_factor_for_STP/volume_sampled)
i+=1
j+=10
#get number data
cursor.execute(('SELECT bnn.70t80,bnn.80t90,bnn.90t100,bnn.100t110,bnn.110t120,bnn.120t130,bnn.130t140,bnn.140t150,bnn.150t160,bnn.160t170,bnn.170t180,bnn.180t190,bnn.190t200,bnn.200t210,bnn.210t220,bnn.sampled_vol,bnn.total_number, ftd.temperature_C,ftd.BP_Pa from polar6_binned_number_and_sampled_volume_alertcalib bnn join polar6_flight_track_details ftd ON bnn.flight_track_data_id = ftd.id WHERE ftd.alt >=%s and ftd.alt < %s and bnn.UNIX_UTC_ts >= %s and bnn.UNIX_UTC_ts < %s'),(lower_alt,(lower_alt + alt_incr),UNIX_start_time,UNIX_end_time))
number_data = cursor.fetchall()
for row in number_data:
volume_sampled = row[15]
total_number = row[16]
temperature = row[17] + 273.15 #convert to Kelvin
pressure = row[18]
correction_factor_for_STP = (101325/pressure)*(temperature/273)
#append STP corrected number conc to dict of binned data
i=1
j=bin_value_min
while i <= bin_number_lim:
number_binned_data[j].append(row[i]*correction_factor_for_STP/volume_sampled)
i+=1
j+=10
#make lists from binned data and sort
binned_list = []
number_binned_list = []
for key in mass_binned_data:
abs_xsec = MieCalc(wavelength,(key+bin_incr/2),assumed_coating_th[alt])[0]
sca_xsec = MieCalc(wavelength,(key+bin_incr/2),assumed_coating_th[alt])[1]
abs_xsec_bare = MieCalc(wavelength,(key+bin_incr/2),0)[0]
sca_xsec_bare = MieCalc(wavelength,(key+bin_incr/2),0)[1]
binned_list.append([(key+bin_incr/2), np.mean(mass_binned_data[key]), np.mean(number_binned_data[key]), abs_xsec,sca_xsec, abs_xsec_bare,sca_xsec_bare])
binned_list.sort()
#optical constants for the measured mass range
optical_data_meas = []
for row in binned_list:
row[1] = row[1]/(math.log((row[0]+bin_incr/2))-math.log(row[0]-bin_incr/2)) #normalize mass
row[2] = row[2]/(math.log((row[0]+bin_incr/2))-math.log(row[0]-bin_incr/2)) #normalize number
bin_midpoint = row[0]
bin_mass = row[1] #in fg/cm3
bin_number = row[2] #in #/cm3
bin_abs_xsec = row[3] #in cm2
bin_sca_xsec = row[4] #in cm2
bin_abs_xsec_bare = row[5] #in cm2
bin_sca_xsec_bare = row[6] #in cm2
vol_abs_coeff = bin_number*bin_abs_xsec #in cm-1
vol_sca_coeff = bin_number*bin_sca_xsec #in cm-1
vol_abs_coeff_bare = bin_number*bin_abs_xsec_bare #in cm-1
vol_sca_coeff_bare = bin_number*bin_sca_xsec_bare #in cm-1
mass_abs_coeff_int = (vol_abs_coeff)/bin_mass #in cm2/fg
mass_abs_coeff = mass_abs_coeff_int*(10**11) #in m2/g
optical_data_meas.append([bin_midpoint,bin_mass,bin_number,vol_abs_coeff,vol_sca_coeff,vol_abs_coeff_bare,vol_sca_coeff_bare])
bin_midpoints = np.array([row[0] for row in optical_data_meas])
mass_concs = [row[1] for row in optical_data_meas]
mass_concs_sum = np.sum([row[1] for row in optical_data_meas])
number_concs = np.array([row[2] for row in optical_data_meas])
vol_abs_coeff_sum = np.sum([row[3] for row in optical_data_meas])
vol_sca_coeff_sum = np.sum([row[4] for row in optical_data_meas])
vol_abs_coeff_sum_bare = np.sum([row[5] for row in optical_data_meas])
vol_sca_coeff_sum_bare = np.sum([row[6] for row in optical_data_meas])
MAC_meas = vol_abs_coeff_sum*(10**11)/mass_concs_sum
SSA_meas = vol_sca_coeff_sum/(vol_sca_coeff_sum+vol_abs_coeff_sum)
MAC_meas_bare = vol_abs_coeff_sum_bare*(10**11)/mass_concs_sum
SSA_meas_bare = vol_sca_coeff_sum_bare/(vol_sca_coeff_sum_bare+vol_abs_coeff_sum_bare)
abs_enhancement_meas = vol_abs_coeff_sum/vol_abs_coeff_sum_bare
#fit mass distr with lognormal
#get Dg and sigma and write to dict
try:
popt, pcov = curve_fit(lognorm, bin_midpoints, mass_concs)
fit_binned_mass_concs = []
for bin in fit_bins:
fit_val = lognorm(bin, popt[0], popt[1], popt[2])
fit_binned_mass_concs.append([bin,fit_val])
except:
print 'fit failure'
#fit number distr with lognormal
try:
popt, pcov = curve_fit(lognorm, bin_midpoints, number_concs)
fit_binned_number_concs = []
fit_binned_mass_concs_c = []
for bin in fit_bins:
fit_val = lognorm(bin, popt[0], popt[1], popt[2])
fit_binned_number_concs.append([bin,fit_val])
except:
print 'fit failure'
#optical constants for the extrapolated (from fit) full mass range
i=0
optical_data = []
for row in fit_binned_number_concs:
bin_midpoint = row[0]
bin_mass = fit_binned_mass_concs[i][1] #in fg/cm3
bin_number = row[1] #in #/cm3
abs_xsec = MieCalc(wavelength,bin_midpoint,assumed_coating_th[alt])[0]
sca_xsec = MieCalc(wavelength,bin_midpoint,assumed_coating_th[alt])[1]
abs_xsec_bare = MieCalc(wavelength,bin_midpoint,0)[0]
sca_xsec_bare = MieCalc(wavelength,bin_midpoint,0)[1]
vol_abs_coeff = bin_number*abs_xsec #in cm-1
vol_sca_coeff = bin_number*sca_xsec #in cm-1
vol_abs_coeff_bare = bin_number*abs_xsec_bare #in cm-1
vol_sca_coeff_bare = bin_number*sca_xsec_bare #in cm-1
mass_abs_coeff_int = (vol_abs_coeff)/bin_mass #in cm2/fg
mass_abs_coeff = mass_abs_coeff_int*(10**11) #in m2/g
optical_data.append([bin_mass,vol_abs_coeff,vol_sca_coeff,vol_abs_coeff_bare,vol_sca_coeff_bare,bin_midpoint])
i+=1
mass_concs_sum_calc = np.sum([row[0] for row in optical_data])
vol_abs_coeff_sum_calc = np.sum([row[1] for row in optical_data])
vol_sca_coeff_sum_calc = np.sum([row[2] for row in optical_data])
vol_abs_coeff_sum_calc_bare = np.sum([row[3] for row in optical_data])
vol_sca_coeff_sum_calc_bare = np.sum([row[4] for row in optical_data])
MAC_calc = vol_abs_coeff_sum_calc*(10**11)/mass_concs_sum_calc
SSA_calc = vol_sca_coeff_sum_calc/(vol_sca_coeff_sum_calc+vol_abs_coeff_sum_calc)
MAC_calc_bare = vol_abs_coeff_sum_calc_bare*(10**11)/mass_concs_sum_calc
SSA_calc_bare = vol_sca_coeff_sum_calc_bare/(vol_sca_coeff_sum_calc_bare+vol_abs_coeff_sum_calc_bare)
abs_enhancement_calc = vol_abs_coeff_sum_calc/vol_abs_coeff_sum_calc_bare
#add overall data to dict
mean_alt = lower_alt + alt_incr/2
print mean_alt
if mean_alt in plot_data:
plot_data[mean_alt].append([MAC_calc,SSA_calc,MAC_calc_bare,SSA_calc_bare,MAC_meas,SSA_meas,MAC_meas_bare,SSA_meas_bare,abs_enhancement_meas,abs_enhancement_calc])
else:
plot_data[mean_alt] = [[MAC_calc,SSA_calc,MAC_calc_bare,SSA_calc_bare,MAC_meas,SSA_meas,MAC_meas_bare,SSA_meas_bare,abs_enhancement_meas,abs_enhancement_calc]]
####plotting distrs if desired
fit_binned_mass_conc_vals = [row[1] for row in fit_binned_mass_concs]
fit_binned_number_conc_vals = [row[1] for row in fit_binned_number_concs]
if show_distr_plots == True:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(bin_midpoints,number_concs, color = 'g',marker='o')
ax1.semilogx(bin_midpoints,mass_concs, color = 'b',marker='o')
ax1.semilogx(fit_bins,fit_binned_mass_conc_vals, color = 'b',marker=None)
ax1.semilogx(fit_bins,fit_binned_number_conc_vals, color = 'g',marker=None)
plt.ylabel('dM/dlog(VED)')
ax1.set_xlabel('VED (nm)')
plt.show()
lower_alt += alt_incr
alt += 1
cnx.close()
print 'next step . . .'
##
plot_list = []
for mean_alt in plot_data:
mean_MAC_calc = np.mean([row[0] for row in plot_data[mean_alt]])
min_MAC_calc = mean_MAC_calc - np.min([row[0] for row in plot_data[mean_alt]])
max_MAC_calc = np.max([row[0] for row in plot_data[mean_alt]]) - mean_MAC_calc
mean_SSA_calc = np.mean([row[1] for row in plot_data[mean_alt]])
min_SSA_calc = mean_SSA_calc - np.min([row[1] for row in plot_data[mean_alt]])
max_SSA_calc = np.max([row[1] for row in plot_data[mean_alt]]) - mean_SSA_calc
mean_MAC_calc_bare = np.mean([row[2] for row in plot_data[mean_alt]])
min_MAC_calc_bare = mean_MAC_calc_bare - np.min([row[2] for row in plot_data[mean_alt]])
max_MAC_calc_bare = np.max([row[2] for row in plot_data[mean_alt]]) - mean_MAC_calc_bare
mean_SSA_calc_bare = np.mean([row[3] for row in plot_data[mean_alt]])
min_SSA_calc_bare = mean_SSA_calc_bare - np.min([row[3] for row in plot_data[mean_alt]])
max_SSA_calc_bare = np.max([row[3] for row in plot_data[mean_alt]]) - mean_SSA_calc_bare
mean_MAC_meas = np.mean([row[4] for row in plot_data[mean_alt]])
min_MAC_meas = mean_MAC_meas - np.min([row[4] for row in plot_data[mean_alt]])
max_MAC_meas = np.max([row[4] for row in plot_data[mean_alt]]) - mean_MAC_meas
mean_SSA_meas = np.mean([row[5] for row in plot_data[mean_alt]])
min_SSA_meas = mean_SSA_meas - np.min([row[5] for row in plot_data[mean_alt]])
max_SSA_meas = np.max([row[5] for row in plot_data[mean_alt]]) - mean_SSA_meas
mean_MAC_meas_bare = np.mean([row[6] for row in plot_data[mean_alt]])
min_MAC_meas_bare = mean_MAC_meas_bare - np.min([row[6] for row in plot_data[mean_alt]])
max_MAC_meas_bare = np.max([row[6] for row in plot_data[mean_alt]]) - mean_MAC_meas_bare
mean_SSA_meas_bare = np.mean([row[7] for row in plot_data[mean_alt]])
min_SSA_meas_bare = mean_SSA_meas_bare - np.min([row[7] for row in plot_data[mean_alt]])
max_SSA_meas_bare = np.max([row[7] for row in plot_data[mean_alt]]) - mean_SSA_meas_bare
mean_abse_meas = np.mean([row[8] for row in plot_data[mean_alt]])
mean_abse_calc = np.mean([row[9] for row in plot_data[mean_alt]])
plot_list.append([mean_alt,mean_MAC_calc,mean_SSA_calc,mean_MAC_calc_bare,mean_SSA_calc_bare,mean_MAC_meas,mean_SSA_meas,mean_MAC_meas_bare,mean_SSA_meas_bare,mean_abse_calc,mean_abse_meas])
plot_list.sort()
altitudes = [row[0] for row in plot_list]
MAC_calc_mean = [row[1] for row in plot_list]
SSA_calc_mean = [row[2] for row in plot_list]
MAC_calc_mean_bare = [row[3] for row in plot_list]
SSA_calc_mean_bare = [row[4] for row in plot_list]
MAC_meas_mean = [row[5] for row in plot_list]
SSA_meas_mean = [row[6] for row in plot_list]
MAC_meas_mean_bare = [row[7] for row in plot_list]
SSA_meas_mean_bare = [row[8] for row in plot_list]
mean_abse_calc = [row[9] for row in plot_list]
mean_abse_meas = [row[10] for row in plot_list]
fig = plt.figure(figsize=(10,10))
ax1 = plt.subplot2grid((2,2), (0,0), colspan=1)
ax2 = plt.subplot2grid((2,2), (0,1), colspan=1)
ax3 = plt.subplot2grid((2,2), (1,0), colspan=1)
ax1.plot(MAC_calc_mean,altitudes,marker='o',linestyle='-', color = 'b', label = 'coated rBC')
ax1.plot(MAC_calc_mean_bare,altitudes,marker='o',linestyle='--', color = 'b',alpha = 0.5, label = 'bare rBC')
#ax1.plot(MAC_meas_mean,altitudes,marker='o',linestyle='-', color = 'r', label = 'coated rBC')
#ax1.plot(MAC_meas_mean_bare,altitudes,marker='o',linestyle='--', color = 'r',alpha = 0.5, label = 'bare rBC')
ax1.set_ylabel('altitude (m)')
ax1.set_xlabel('MAC (m2/g)')
ax1.set_xlim(5,18)
ax1.set_ylim(0,5000)
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels,loc=7)
ax2.plot(SSA_calc_mean,altitudes,marker='o',linestyle='-', color = 'b')
ax2.plot(SSA_calc_mean_bare,altitudes,marker='o',linestyle='--', color = 'b',alpha = 0.5)
#ax2.plot(SSA_meas_mean,altitudes,marker='o',linestyle='-', color = 'r')
#ax2.plot(SSA_meas_mean_bare,altitudes,marker='o',linestyle='--', color = 'r',alpha = 0.5)
ax2.set_xlabel('SSA')
ax2.set_ylabel('altitude (m)')
ax2.set_xlim(0.38,0.5)
ax2.set_ylim(0,5000)
#ax3.plot(SSA_calc_mean,altitudes,marker='o',linestyle='-', color = 'b')
#ax3.plot(SSA_calc_mean_bare,altitudes,marker='o',linestyle='--', color = 'b',alpha = 0.5)
ax3.plot(mean_abse_calc,altitudes,marker='o',linestyle='-', color = 'b')
#ax3.plot(mean_abse_meas,altitudes,marker='o',linestyle='-', color = 'r')
ax3.set_xlabel('absorption enhancement')
ax3.set_ylabel('altitude (m)')
ax3.set_xlim(1.3,1.7)
ax3.set_ylim(0,5000)
dir = 'C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/'
os.chdir(dir)
if savefig == True:
plt.savefig('MAC SSA abs enhancement - Sc 1-7 full mass range.png', bbox_inches='tight')
plt.show()
| mit |
WmHHooper/aima-python | submissions/Marszalkowski/myKMeans.py | 1 | 3040 | from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
nbaPGData = [
[ 15.8, 8.2, 8.1, 1.7 ],
[ 25.4, 10.3, 10.1, 1.8 ],
[ 22.1, 5.6, 3.1, 1.1 ],
[ 16.7, 3.4, 3.7, 1.0 ],
[ 16.2, 6.9, 5.6, 1.1 ],
[ 13.1, 5.3, 4.6, 1.6 ],
[ 17.3, 4.8, 4.1, 0.8 ],
[ 17.7, 5.0, 3.8, 2.0 ],
[ 26.9, 6.6, 4.5, 1.1 ],
[ 14.2, 7.0, 3.0, 1.5 ],
[ 15.2, 5.2, 3.8, 1.0 ],
[ 19.4, 6.2, 3.1, 1.1 ],
[ 12.4, 5.3, 2.6, 1.3 ],
[ 12.7, 6.2, 4.3, 1.3 ],
[ 8.3, 8.2, 4.0, 1.1 ],
[ 24.4, 5.1, 3.8, 1.1 ],
[ 11.6, 4.4, 2.8, 1.0 ],
[ 10.0, 2.8, 2.7, 0.9 ],
[ 18.6, 7.9, 5.4, 1.7 ],
[ 12.6, 6.6, 3.2, 0.9 ],
[ 7.5, 5.6, 3.1, 0.6 ],
[ 26.4, 6.1, 5.1, 1.6 ],
[ 10.2, 7.2, 6.9, 1.7 ],
[ 8.1, 2.9, 5.7, 1.2 ],
[ 9.5, 3.2, 2.3, 0.7 ],
[ 14.6, 5.3, 2.8, 0.6 ],
[ 13.4, 6.0, 4.3, 2.0 ],
[ 7.8, 4.4, 1.8, 1.0 ],
[ 19.4, 9.6, 3.7, 1.4 ],
[ 15.3, 7.8, 4.0, 1.2 ],
[ 29.1, 11.2, 8.1, 1.5 ],
[ 31.6, 10.4, 10.7, 1.6 ],
[ 25.3, 6.6, 4.5, 1.8 ],
[ 23.2, 5.5, 3.9, 1.1 ],
[ 17.9, 6.3, 3.1, 0.9 ],
[ 23.1, 10.7, 4.2, 2.0 ],
[ 28.9, 5.9, 2.7, 0.9 ],
[ 27.0, 5.9, 4.9, 0.9 ],
[ 11.1, 9.1, 4.1, 1.7 ],
[ 20.3, 5.8, 3.8, 1.2 ],
[ 25.2, 5.8, 3.2, 1.2 ],
[ 20.5, 6.3, 3.5, 1.3 ],
[ 21.1, 6.3, 4.8, 1.4 ],
[ 13.2, 4.6, 2.2, 1.0 ],
[ 18.0, 4.4, 3.8, 0.7 ],
[ 10.1, 4.5, 1.8, 0.5 ],
[ 15.4, 7.3, 3.9, 1.5 ],
[ 18.1, 9.2, 5.0, 2.0 ],
[ 22.4, 7.0, 4.8, 1.5 ],
[ 15.6, 4.8, 3.5, 1.4 ],
[ 12.8, 6.5, 4.7, 1.1 ],
[ 7.6, 4.7, 1.9, 0.7 ],
[ 6.9, 6.6, 3.1, 1.7 ],
[ 14.5, 5.2, 2.2, 0.7 ],
[ 16.9, 4.2, 3.4, 1.0 ],
[ 11.0, 5.6, 2.3, 0.5 ],
[ 12.8, 2.7, 2.6, 1.1 ],
[ 7.8, 6.7, 5.1, 1.4 ],
[ 11.0, 3.9, 3.2, 0.7 ],
[ 20.9, 5.2, 4.4, 1.6 ],
[ 23.5, 10.4, 7.8, 2.0 ],
[ 16.9, 4.3, 7.7, 1.2 ],
[ 30.1, 6.7, 5.4, 2.1 ],
[ 18.8, 6.2, 3.0, 1.1 ],
[ 22.2, 6.2, 3.0, 1.1 ],
[ 15.7, 5.9, 2.7, 1.2 ],
[ 21.2, 6.4, 4.7, 2.1 ],
[ 19.9, 10.2, 4.9, 1.9 ],
[ 10.1, 8.7, 4.3, 2.1 ],
[ 25.1, 6.8, 4.0, 0.9 ],
[ 19.5, 10.0, 4.2, 2.1 ],
[ 12.1, 3.5, 4.0, 1.1 ],
[ 19.0, 4.6, 4.1, 1.1 ],
[ 7.6, 4.1, 3.2, 0.9 ],
[ 14.1, 5.8, 3.8, 1.0 ],
[ 11.9, 5.3, 2.4, 0.8 ],
[ 11.9, 11.7, 6.0, 2.0 ],
[ 10.7, 6.4, 3.6, 1.2 ],
[ 12.8, 5.5, 3.4, 1.0 ],
[ 16.4, 4.7, 3.4, 0.7 ],
[ 9.9, 3.4, 3.5, 1.3 ],
[ 14.1, 5.8, 2.9, 0.9 ],
[ 15.3, 6.1, 2.9, 1.2 ],
[ 19.6, 4.7, 3.0, 1.1 ],
[ 12.6, 6.5, 4.0, 3.4 ],
[ 13.2, 3.3, 3.4, 1.2 ],
[ 10.3, 5.4, 2.2, 0.5 ],
[ 15.6, 10.2, 4.3, 1.3 ],
[ 12.2, 6.4, 3.4, 1.5 ],
[ 17.6, 5.6, 4.0, 1.2 ],
[ 15.5, 7.9, 8.7, 1.4 ],
[ 15.9, 7.6, 3.0, 0.7 ],
[ 15.0, 6.0, 4.5, 1.3 ],
[ 9.0, 4.8, 2.3, 1.5 ],
[ 12.6, 2.3, 1.8, 0.7 ],
[ 27.1, 6.0, 5.3, 0.8 ],
[ 27.4, 6.3, 4.3, 1.3 ],
[ 21.5, 8.1, 3.6, 1.7 ],
[ 20.3, 6.6, 3.4, 1.2 ],
[ 17.5, 7.5, 4.0, 1.2 ],
[ 22.0, 6.4, 4.9, 1.9 ],
[ 17.5, 4.5, 4.3, 0.9 ],
[ 8.2, 4.5, 5.6, 1.0 ],
[ 16.0, 4.2, 2.7, 0.7 ],
[ 13.9, 3.9, 2.8, 1.2 ],
[ 6.7, 4.0, 4.0, 0.7 ],
[ 12.6, 7.6, 2.3, 1.3 ],
[ 7.5, 3.3, 2.6, 0.6 ],
]
normalized_data = preprocessing.normalize(nbaPGData)
Examples = {
'pgNotNormalized': {
'data': nbaPGData,
'k': [3, 2, 4],
},
'pgNormalized': {
'data': normalized_data,
'k': [2, 4, 3],
},
} | mit |
RomainBrault/scikit-learn | examples/decomposition/plot_sparse_coding.py | 60 | 4016 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution // subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=n_components // 5)
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| bsd-3-clause |
RAJSD2610/SDNopenflowSwitchAnalysis | ece671/FinalPlot.py | 1 | 17007 |
# coding: utf-8
# In[6]:
#TCP ftotal
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
seaborn.set()
path= os.path.expanduser("~/Desktop/ece671/ntcp3")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
t3=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/ntcp3/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=3:
y=0
t3.append(y)
i+=1
print(t3)
path= os.path.expanduser("~/Desktop/ece671/ntcp10")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t10=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/ntcp10/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=3:
y=0
t10.append(y)
i+=1
print(t10)
path= os.path.expanduser("~/Desktop/ece671/ntcp8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t8=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/ntcp8/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=3:
y=0
t8.append(y)
i+=1
print(t8)
path= os.path.expanduser("~/Desktop/ece671/ntcp15")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t15=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/ntcp15/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=3:
y=0
t15.append(y)
i+=1
print(t15)
#plt.figure(figsize=(4, 5))
plt.plot(list(range(1,len(t3)+1)),t3, '.-',label="tcpt3")
plt.plot(list(range(1,len(t8)+1)),t8, '.-',label="tcpt8")
plt.plot(list(range(1,len(t10)+1)),t10, '.-',label="tcp10")
plt.plot(list(range(1,len(t15)+1)),t15, '.-',label="tcpt15")
plt.title("Total Flows Present")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
# In[7]:
#UDP ftotal
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
seaborn.set()
path= os.path.expanduser("~/Desktop/ece671/nudp3")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
u3=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/nudp3/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u3.append(y)
i+=1
print(u3)
path= os.path.expanduser("~/Desktop/ece671/nudp10")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
u10=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/nudp10/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u10.append(y)
i+=1
print(u10)
path= os.path.expanduser("~/Desktop/ece671/nudp8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
u8=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/nudp8/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u8.append(y)
i+=1
print(u8)
path= os.path.expanduser("~/Desktop/ece671/nudp15")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
u15=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/nudp15/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u15.append(y)
i+=1
print(u15)
#plt.figure(figsize=(4, 5))
plt.plot(list(range(1,len(u3)+1)),u3, '.-',label="udpt3")
plt.plot(list(range(1,len(u8)+1)),u8, '.-',label="udpt8")
plt.plot(list(range(1,len(u10)+1)),u10, '.-',label="udpt10")
plt.plot(list(range(1,len(u15)+1)),u15, '.-',label="udpt15")
plt.title("Total Flows Present")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
# In[34]:
#TCP Fpersec
#TCP fpersec
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
seaborn.set()
path= os.path.expanduser("~/Desktop/ece671/ntcp3")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
t3=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/ntcp3/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=3:
y=0
t3.append(y)
i+=1
print(t3)
path= os.path.expanduser("~/Desktop/ece671/ntcp10")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t10=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/ntcp10/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=3:
y=0
t10.append(y)
i+=1
print(t10)
path= os.path.expanduser("~/Desktop/ece671/ntcp8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t8=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/ntcp8/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=3:
y=0
t8.append(y)
i+=1
print(t8)
path= os.path.expanduser("~/Desktop/ece671/ntcp15")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t15=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/ntcp15/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=3:
y=0
t15.append(y)
i+=1
print(t15)
#sum
s=[]
s3=sum(t3)
s.append(s3)
s8=sum(t8)
s.append(s8)
s10=sum(t10)
s.append(s10)
s15=sum(t15)
s.append(s15)
l=[3,8,10,15]
plt.plot(l,s, '.-',label="total flows")
plt.title("TCP traffic")
plt.xlabel("timeout(s)")
plt.ylabel("flows")
plt.legend()
plt.show()
#avg
av=[]
av3=s3/(len(t3))
av.append(av3)
av8=s8/(len(t8))
av.append(av8)
av10=s10/(len(t10))
av.append(av10)
av15=s15/(len(t15))
av.append(av15)
plt.plot(l,av, '.-',label="avg flows")
plt.title("TCP traffic")
plt.xlabel("timeout(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
#frequency
import collections
counter3=collections.Counter(t3)
counter8=collections.Counter(t8)
counter10=collections.Counter(t10)
counter15=collections.Counter(t15)
#frequency of 0
freq0_3=counter[min(t3)]
freq0_8=counter[min(t8)]
freq0_10=counter[min(t10)]
freq0_15=counter[min(t15)]
freq0=[freq0_3,freq0_8,freq0_10,freq0_15]
plt.plot(l,freq0, '.-',label="Frequency of 0 flows")
plt.title("TCP traffic")
plt.xlabel("timeout(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
#plt.figure(figsize=(4, 5))
plt.plot(list(range(1,len(t3)+1)),t3, '.-',label="tcpt3")
plt.plot(list(range(1,len(t8)+1)),t8, '.-',label="tcpt8")
plt.plot(list(range(1,len(t10)+1)),t10, '.-',label="tcp10")
plt.plot(list(range(1,len(t15)+1)),t15, '.-',label="tcpt15")
plt.title("Flows Programmed per second")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
# In[32]:
#UDP fpersec
#UDP fpersec
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
seaborn.set()
path= os.path.expanduser("~/Desktop/ece671/nudp3")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
u3=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/nudp3/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=1:
y=0
u3.append(y)
i+=1
print(u3)
path= os.path.expanduser("~/Desktop/ece671/nudp10")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
u10=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/nudp10/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=1:
y=0
u10.append(y)
i+=1
print(u10)
path= os.path.expanduser("~/Desktop/ece671/nudp8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
u8=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/nudp8/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=1:
y=0
u8.append(y)
i+=1
print(u8)
path= os.path.expanduser("~/Desktop/ece671/nudp15")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
u15=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/nudp15/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=1:
y=0
u15.append(y)
i+=1
print(u15)
#sum
s=[]
s3=sum(u3)
s.append(s3)
s8=sum(u8)
s.append(s8)
s10=sum(u10)
s.append(s10)
s15=sum(u15)
s.append(s15)
l=[3,8,10,15]
plt.plot(l,s, '.-',label="total flows")
plt.title("UDP traffic")
plt.xlabel("timeout(s)")
plt.ylabel("flows")
plt.legend()
plt.show()
#avg
av=[]
av3=s3/(len(u3))
av.append(av3)
av8=s8/(len(u8))
av.append(av8)
av10=s10/(len(u10))
av.append(av10)
av15=s15/(len(u15))
av.append(av15)
plt.plot(l,av, '.-',label="avg flows")
plt.title("UDP traffic")
plt.xlabel("timeout(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
#frequency
import collections
counter3=collections.Counter(u3)
counter8=collections.Counter(u8)
counter10=collections.Counter(u10)
counter15=collections.Counter(u15)
#frequency of 0
freq0_3=counter[0]
freq0_8=counter[0]
freq0_10=counter[0]
freq0_15=counter[0]
freq0=[freq0_3,freq0_8,freq0_10,freq0_15]
plt.plot(l,freq0, '.-',label="Frequency of 0 flows")
plt.title("UDP traffic")
plt.xlabel("timeout(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
#plt.figure(figsize=(4, 5))
plt.plot(list(range(1,len(u3)+1)),u3, '.-',label="udpt3")
plt.plot(list(range(1,len(u8)+1)),u8, '.-',label="udpt8")
plt.plot(list(range(1,len(u10)+1)),u10, '.-',label="udpt10")
plt.plot(list(range(1,len(u15)+1)),u15, '.-',label="udpt15")
plt.title("Flows Programmed per second")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
# In[46]:
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
path= os.path.expanduser("~/Desktop/ece671/SwitchAnalysis/PlotDump")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
a=[]
df=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/SwitchAnalysis/PlotDump/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
y-=2
if y<0:
y=0
a.append(y)
i+=1
print(a)
#plt.figure(figsize=(4, 5))
seaborn.barplot(x=list(range(1,len(a)+1)),y=a)
plt.title("Flows Programmed persec")
plt.xlabel("time(s)")
plt.ylabel("flows")
plt.show()
# In[16]:
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
path= os.path.expanduser("~/Desktop/ece671/udpt8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
u8=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/udpt8/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u8.append(y)
i+=1
print(u8)
path= os.path.expanduser("~/Desktop/ece671/udpnone")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
u=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/udpnone/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u.append(y)
i+=1
print(u)
path= os.path.expanduser("~/Desktop/ece671/tcpnone")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/tcpnone/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t.append(y)
i+=1
print(t)
path= os.path.expanduser("~/Desktop/ece671/tcpt8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t8=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/tcpt8/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t8.append(y)
i+=1
print(t8)
#plt.figure(figsize=(4, 5))
#plt.figure(figsize=(4, 5))
plt.plot(list(range(1,len(u8)+1)),u8, '.-',label="udpt8")
plt.plot(list(range(1,len(u)+1)),u, '.-',label="udpnone")
plt.plot(list(range(1,len(t)+1)),t, '.-',label="tcpnone")
plt.plot(list(range(1,len(t8)+1)),t8, '.-',label="tcpt8")
plt.title("Flows Programmed per Sec")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
# In[ ]:
| gpl-3.0 |
wazeerzulfikar/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 38 | 16445 | import sys
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import StringIO
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_components = 3
block = n_components * np.ones((3, 3))
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_components, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_components, X = _build_sparse_mtx()
prior = 1. / n_components
lda_1 = LatentDirichletAllocation(n_components=n_components,
doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
evaluate_every=1, learning_method='batch',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., evaluate_every=1,
learning_method='online', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., total_samples=100,
random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_method='batch', random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_components = 3
lda = LatentDirichletAllocation(n_components=n_components,
random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
assert_array_almost_equal(np.sum(X_trans, axis=1),
np.ones(X_trans.shape[0]))
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_components=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_components = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=5., total_samples=20,
random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_components', LatentDirichletAllocation(n_components=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_components = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_components=n_components,
random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_components, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_method=method,
evaluate_every=1, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_components = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=5., total_samples=20,
random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components))
assert_raises_regexp(ValueError, r'Number of samples',
lda._perplexity_precomp_distr, X, invalid_n_samples)
# invalid topic number
invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1))
assert_raises_regexp(ValueError, r'Number of topics',
lda._perplexity_precomp_distr, X,
invalid_n_components)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_components, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit(X)
perp_1 = lda_1.perplexity(X, sub_sampling=False)
lda_2.fit(X)
perp_2 = lda_2.perplexity(X, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_components, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
lda.fit(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X.toarray())
assert_almost_equal(perp_1, perp_2)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=10,
random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_fit_perplexity():
# Test that the perplexity computed during fit is consistent with what is
# returned by the perplexity method
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch', random_state=0,
evaluate_every=1)
lda.fit(X)
# Perplexity computed at end of fit method
perplexity1 = lda.bound_
# Result of perplexity method on the train set
perplexity2 = lda.perplexity(X)
assert_almost_equal(perplexity1, perplexity2)
def test_doc_topic_distr_deprecation():
# Test that the appropriate warning message is displayed when a user
# attempts to pass the doc_topic_distr argument to the perplexity method
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr1 = lda.fit_transform(X)
distr2 = None
assert_warns(DeprecationWarning, lda.perplexity, X, distr1)
assert_warns(DeprecationWarning, lda.perplexity, X, distr2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
def check_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities):
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=3,
learning_method='batch',
verbose=verbose,
evaluate_every=evaluate_every,
random_state=0)
out = StringIO()
old_out, sys.stdout = sys.stdout, out
try:
lda.fit(X)
finally:
sys.stdout = old_out
n_lines = out.getvalue().count('\n')
n_perplexity = out.getvalue().count('perplexity')
assert_equal(expected_lines, n_lines)
assert_equal(expected_perplexities, n_perplexity)
def test_verbosity():
for verbose, evaluate_every, expected_lines, expected_perplexities in [
(False, 1, 0, 0),
(False, 0, 0, 0),
(True, 0, 3, 0),
(True, 1, 3, 3),
(True, 2, 3, 1),
]:
yield (check_verbosity, verbose, evaluate_every, expected_lines,
expected_perplexities)
def test_lda_n_topics_deprecation():
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=10, learning_method='batch')
assert_warns(DeprecationWarning, lda.fit, X)
| bsd-3-clause |
daeilkim/refinery | refinery/fact_classifier/classify_ex.py | 1 | 1262 | from sklearn import svm
from sklearn.feature_extraction import DictVectorizer
from collections import defaultdict
import pickle
v = DictVectorizer()
#TODO : need to tokenize the words before using them as features!
def main():
def munge(s):
ps = s.split()
label = int(ps[0])
ws = defaultdict(int)
for w in ps[1:]:
ws[w] += 1
return [label,ws]
data = [munge(l.strip()) for l in open("/home/chonger/Downloads/annotations.txt")]
labels = [x[0] for x in data]
dicts = [x[1] for x in data]
feats = v.fit_transform(dicts)
ttsplit = int(len(labels) * .8)
clf = svm.SVC(kernel='linear', class_weight={1: 10})
#clf = svm.SVC()
clf.fit(feats[:ttsplit],labels[:ttsplit])
print clf.score(feats[ttsplit:],labels[ttsplit:])
tot = defaultdict(int)
tr = defaultdict(int)
for ex in labels[ttsplit:]:
tr[ex] += 1
for ex in feats[ttsplit:]:
tot[(clf.predict(ex).tolist())[0]] += 1
print tr
print tot
print feats[0]
print feats[1]
f = open("/home/chonger/factsvm",'w')
pickle.dump(clf,f)
f.close()
f = open("/home/chonger/factfeat",'w')
pickle.dump(v,f)
f.close()
if __name__ == "__main__":
main()
| mit |
jonyroda97/redbot-amigosprovaveis | lib/matplotlib/testing/jpl_units/StrConverter.py | 23 | 5293 | #===========================================================================
#
# StrConverter
#
#===========================================================================
"""StrConverter module containing class StrConverter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import matplotlib.units as units
from matplotlib.cbook import iterable
# Place all imports before here.
#===========================================================================
__all__ = [ 'StrConverter' ]
#===========================================================================
class StrConverter( units.ConversionInterface ):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for string data values.
Valid units for string are:
- 'indexed' : Values are indexed as they are specified for plotting.
- 'sorted' : Values are sorted alphanumerically.
- 'inverted' : Values are inverted so that the first value is on top.
- 'sorted-inverted' : A combination of 'sorted' and 'inverted'
"""
#------------------------------------------------------------------------
@staticmethod
def axisinfo( unit, axis ):
""": Returns information on how to handle an axis that has string data.
= INPUT VARIABLES
- axis The axis using this converter.
- unit The units to use for a axis with string data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
return None
#------------------------------------------------------------------------
@staticmethod
def convert( value, unit, axis ):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- axis The axis using this converter.
- value The value or list of values that need to be converted.
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
if ( units.ConversionInterface.is_numlike( value ) ):
return value
if ( value == [] ):
return []
# we delay loading to make matplotlib happy
ax = axis.axes
if axis is ax.get_xaxis():
isXAxis = True
else:
isXAxis = False
axis.get_major_ticks()
ticks = axis.get_ticklocs()
labels = axis.get_ticklabels()
labels = [ l.get_text() for l in labels if l.get_text() ]
if ( not labels ):
ticks = []
labels = []
if ( not iterable( value ) ):
value = [ value ]
newValues = []
for v in value:
if ( (v not in labels) and (v not in newValues) ):
newValues.append( v )
for v in newValues:
if ( labels ):
labels.append( v )
else:
labels = [ v ]
#DISABLED: This is disabled because matplotlib bar plots do not
#DISABLED: recalculate the unit conversion of the data values
#DISABLED: this is due to design and is not really a bug.
#DISABLED: If this gets changed, then we can activate the following
#DISABLED: block of code. Note that this works for line plots.
#DISABLED if ( unit ):
#DISABLED if ( unit.find( "sorted" ) > -1 ):
#DISABLED labels.sort()
#DISABLED if ( unit.find( "inverted" ) > -1 ):
#DISABLED labels = labels[ ::-1 ]
# add padding (so they do not appear on the axes themselves)
labels = [ '' ] + labels + [ '' ]
ticks = list(xrange( len(labels) ))
ticks[0] = 0.5
ticks[-1] = ticks[-1] - 0.5
axis.set_ticks( ticks )
axis.set_ticklabels( labels )
# we have to do the following lines to make ax.autoscale_view work
loc = axis.get_major_locator()
loc.set_bounds( ticks[0], ticks[-1] )
if ( isXAxis ):
ax.set_xlim( ticks[0], ticks[-1] )
else:
ax.set_ylim( ticks[0], ticks[-1] )
result = []
for v in value:
# If v is not in labels then something went wrong with adding new
# labels to the list of old labels.
errmsg = "This is due to a logic error in the StrConverter class. "
errmsg += "Please report this error and its message in bugzilla."
assert ( v in labels ), errmsg
result.append( ticks[ labels.index(v) ] )
ax.viewLim.ignore(-1)
return result
#------------------------------------------------------------------------
@staticmethod
def default_units( value, axis ):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- axis The axis using this converter.
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
Return the default unit for value, or None.
"""
# The default behavior for string indexing.
return "indexed"
| gpl-3.0 |
hrantzsch/signature-verification | tools/ProbitScale.py | 1 | 2228 | # Credit to gnilson
# https://github.com/gnilson/ProbitScale
#
import matplotlib.scale as mscale
import matplotlib.transforms as mtransforms
from matplotlib.ticker import FormatStrFormatter
from scipy.stats import norm
import matplotlib.ticker as ticker
from numpy import *
class ProbitScale (mscale.ScaleBase):
name = 'probit'
def __init__(self, axis, **kwargs):
mscale.ScaleBase.__init__(self)
return
def get_transform(self):
return ProbitScale.ProbitTransform()
def limit_range_for_scale(self, vmin, vmax, minpos):
# return 0.001, 0.999
# return 0.001, 0.6
return max(vmin, 0.001), min(vmax, 0.999)
def set_default_locators_and_formatters(self, axis):
axis.set_major_locator(ticker.FixedLocator(
array([0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 0.99, 0.999])))
axis.set_major_formatter(FormatStrFormatter('%g'))
return
class ProbitTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self):
mtransforms.Transform.__init__(self)
return
def transform(self, a):
masked = ma.masked_where((a <= 0) | (a >= 1), a)
if masked.mask.any():
for i in arange(0, len(masked)):
masked[i] = norm.ppf(masked[i]) if (
(masked[i] < 1) and (masked[i] > 0)) else masked[i]
return masked
return norm.ppf(a)
# for matplotlib 1.3.1
def transform_non_affine(self, a):
return self.transform(a)
def inverted(self):
return ProbitScale.CDFTransform()
class CDFTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self):
mtransforms.Transform.__init__(self)
return
def transform(self, a):
return norm.cdf(a)
# for matplotlib 1.3.1
def transform_non_affine(self, a):
return self.transform(a)
def inverted(self):
return ProbitScale.ProbitTransform()
| gpl-3.0 |
PMBio/warpedLMM | warpedlmm/util/linalg.py | 2 | 19589 | # Copyright (c) 2012, GPy authors (see https://github.com/sheffieldml/gpy/AUTHORS.txt).
# Licensed under the BSD 3-clause license (see https://github.com/sheffieldml/gp/LICENSE.txt)
# tdot function courtesy of Ian Murray:
# Iain Murray, April 2013. iain contactable via iainmurray.net
# http://homepages.inf.ed.ac.uk/imurray2/code/tdot/tdot.py
# link to gpy repository (original source of this file): https://github.com/sheffieldml/gpy
import numpy as np
from scipy import linalg, weave
import types
import ctypes
from ctypes import byref, c_char, c_int, c_double # TODO
# import scipy.lib.lapack
import scipy
import warnings
import os
import scipy.linalg
import logging
logger = logging.getLogger(__name__)
anaconda = False
MKL = False
_scipyversion = np.float64((scipy.__version__).split('.')[:2])
_fix_dpotri_scipy_bug = True
if np.all(_scipyversion >= np.array([0, 14])):
from scipy.linalg import lapack
_fix_dpotri_scipy_bug = False
elif np.all(_scipyversion >= np.array([0, 12])):
#import scipy.linalg.lapack.clapack as lapack
from scipy.linalg import lapack
else:
from scipy.linalg.lapack import flapack as lapack
# if True:#config.getboolean('anaconda', 'installed') and config.getboolean('anaconda', 'MKL'):
# try:
# anaconda_path = str(config.get('anaconda', 'location'))
# mkl_rt = ctypes.cdll.LoadLibrary(os.path.join(anaconda_path, 'DLLs', 'mkl_rt.dll'))
# dsyrk = mkl_rt.dsyrk
# dsyr = mkl_rt.dsyr
# _blas_available = True
# # set a couple of variables that can be useful for debugging purposes
# anaconda = True
# MKL = True
# except:
# _blas_available = False
# else:
# try:
# _blaslib = ctypes.cdll.LoadLibrary(np.core._dotblas.__file__) # @UndefinedVariable
# dsyrk = _blaslib.dsyrk_
# dsyr = _blaslib.dsyr_
# _blas_available = True
# except AttributeError as e:
# _blas_available = False
# warnings.warn("warning: caught this exception:" + str(e))
_blas_available = False # TODO fix config
def force_F_ordered_symmetric(A):
"""
return a F ordered version of A, assuming A is symmetric
"""
if A.flags['F_CONTIGUOUS']:
return A
if A.flags['C_CONTIGUOUS']:
return A.T
else:
return np.asfortranarray(A)
def force_F_ordered(A):
"""
return a F ordered version of A, assuming A is triangular
"""
if A.flags['F_CONTIGUOUS']:
return A
print "why are your arrays not F order?"
return np.asfortranarray(A)
# def jitchol(A, maxtries=5):
# A = force_F_ordered_symmetric(A)
# L, info = lapack.dpotrf(A, lower=1)
# if info == 0:
# return L
# else:
# if maxtries==0:
# raise linalg.LinAlgError, "not positive definite, even with jitter."
# diagA = np.diag(A)
# if np.any(diagA <= 0.):
# raise linalg.LinAlgError, "not pd: non-positive diagonal elements"
# jitter = diagA.mean() * 1e-6
# return jitchol(A+np.eye(A.shape[0])*jitter, maxtries-1)
def jitchol(A, maxtries=5):
A = np.ascontiguousarray(A)
L, info = lapack.dpotrf(A, lower=1)
if info == 0:
return L
else:
diagA = np.diag(A)
if np.any(diagA <= 0.):
raise linalg.LinAlgError, "not pd: non-positive diagonal elements"
jitter = diagA.mean() * 1e-6
while maxtries > 0 and np.isfinite(jitter):
try:
L = linalg.cholesky(A + np.eye(A.shape[0]) * jitter, lower=True)
except:
jitter *= 10
finally:
maxtries -= 1
raise linalg.LinAlgError, "not positive definite, even with jitter."
import traceback
try: raise
except:
logging.warning('\n'.join(['Added jitter of {:.10e}'.format(jitter),
' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]]))
import ipdb;ipdb.set_trace()
return L
# def dtrtri(L, lower=1):
# """
# Wrapper for lapack dtrtri function
# Inverse of L
#
# :param L: Triangular Matrix L
# :param lower: is matrix lower (true) or upper (false)
# :returns: Li, info
# """
# L = force_F_ordered(L)
# return lapack.dtrtri(L, lower=lower)
def dtrtrs(A, B, lower=1, trans=0, unitdiag=0):
"""
Wrapper for lapack dtrtrs function
DTRTRS solves a triangular system of the form
A * X = B or A**T * X = B,
where A is a triangular matrix of order N, and B is an N-by-NRHS
matrix. A check is made to verify that A is nonsingular.
:param A: Matrix A(triangular)
:param B: Matrix B
:param lower: is matrix lower (true) or upper (false)
:returns: Solution to A * X = B or A**T * X = B
"""
A = np.asfortranarray(A)
#Note: B does not seem to need to be F ordered!
return lapack.dtrtrs(A, B, lower=lower, trans=trans, unitdiag=unitdiag)
def dpotrs(A, B, lower=1):
"""
Wrapper for lapack dpotrs function
:param A: Matrix A
:param B: Matrix B
:param lower: is matrix lower (true) or upper (false)
:returns:
"""
A = force_F_ordered(A)
return lapack.dpotrs(A, B, lower=lower)
def dpotri(A, lower=1):
"""
Wrapper for lapack dpotri function
DPOTRI - compute the inverse of a real symmetric positive
definite matrix A using the Cholesky factorization A =
U**T*U or A = L*L**T computed by DPOTRF
:param A: Matrix A
:param lower: is matrix lower (true) or upper (false)
:returns: A inverse
"""
if _fix_dpotri_scipy_bug:
assert lower==1, "scipy linalg behaviour is very weird. please use lower, fortran ordered arrays"
lower = 0
A = force_F_ordered(A)
R, info = lapack.dpotri(A, lower=lower) #needs to be zero here, seems to be a scipy bug
symmetrify(R)
return R, info
def pddet(A):
"""
Determinant of a positive definite matrix, only symmetric matricies though
"""
L = jitchol(A)
logdetA = 2*sum(np.log(np.diag(L)))
return logdetA
def trace_dot(a, b):
"""
Efficiently compute the trace of the matrix product of a and b
"""
return np.sum(a * b)
def mdot(*args):
"""
Multiply all the arguments using matrix product rules.
The output is equivalent to multiplying the arguments one by one
from left to right using dot().
Precedence can be controlled by creating tuples of arguments,
for instance mdot(a,((b,c),d)) multiplies a (a*((b*c)*d)).
Note that this means the output of dot(a,b) and mdot(a,b) will differ if
a or b is a pure tuple of numbers.
"""
if len(args) == 1:
return args[0]
elif len(args) == 2:
return _mdot_r(args[0], args[1])
else:
return _mdot_r(args[:-1], args[-1])
def _mdot_r(a, b):
"""Recursive helper for mdot"""
if type(a) == types.TupleType:
if len(a) > 1:
a = mdot(*a)
else:
a = a[0]
if type(b) == types.TupleType:
if len(b) > 1:
b = mdot(*b)
else:
b = b[0]
return np.dot(a, b)
def pdinv(A, *args):
"""
:param A: A DxD pd numpy array
:rval Ai: the inverse of A
:rtype Ai: np.ndarray
:rval L: the Cholesky decomposition of A
:rtype L: np.ndarray
:rval Li: the Cholesky decomposition of Ai
:rtype Li: np.ndarray
:rval logdet: the log of the determinant of A
:rtype logdet: float64
"""
L = jitchol(A, *args)
logdet = 2.*np.sum(np.log(np.diag(L)))
Li = dtrtri(L)
Ai, _ = dpotri(L, lower=1)
# Ai = np.tril(Ai) + np.tril(Ai,-1).T
symmetrify(Ai)
return Ai, L, Li, logdet
def dtrtri(L):
"""
Inverts a Cholesky lower triangular matrix
:param L: lower triangular matrix
:rtype: inverse of L
"""
L = force_F_ordered(L)
return lapack.dtrtri(L, lower=1)[0]
def multiple_pdinv(A):
"""
:param A: A DxDxN numpy array (each A[:,:,i] is pd)
:rval invs: the inverses of A
:rtype invs: np.ndarray
:rval hld: 0.5* the log of the determinants of A
:rtype hld: np.array
"""
N = A.shape[-1]
chols = [jitchol(A[:, :, i]) for i in range(N)]
halflogdets = [np.sum(np.log(np.diag(L[0]))) for L in chols]
invs = [dpotri(L[0], True)[0] for L in chols]
invs = [np.triu(I) + np.triu(I, 1).T for I in invs]
return np.dstack(invs), np.array(halflogdets)
def pca(Y, input_dim):
"""
Principal component analysis: maximum likelihood solution by SVD
:param Y: NxD np.array of data
:param input_dim: int, dimension of projection
:rval X: - Nxinput_dim np.array of dimensionality reduced data
:rval W: - input_dimxD mapping from X to Y
"""
if not np.allclose(Y.mean(axis=0), 0.0):
print "Y is not zero mean, centering it locally (GPy.util.linalg.pca)"
# Y -= Y.mean(axis=0)
Z = linalg.svd(Y - Y.mean(axis=0), full_matrices=False)
[X, W] = [Z[0][:, 0:input_dim], np.dot(np.diag(Z[1]), Z[2]).T[:, 0:input_dim]]
v = X.std(axis=0)
X /= v;
W *= v;
return X, W.T
def ppca(Y, Q, iterations=100):
"""
EM implementation for probabilistic pca.
:param array-like Y: Observed Data
:param int Q: Dimensionality for reduced array
:param int iterations: number of iterations for EM
"""
from numpy.ma import dot as madot
N, D = Y.shape
# Initialise W randomly
W = np.random.randn(D, Q) * 1e-3
Y = np.ma.masked_invalid(Y, copy=0)
mu = Y.mean(0)
Ycentered = Y - mu
try:
for _ in range(iterations):
exp_x = np.asarray_chkfinite(np.linalg.solve(W.T.dot(W), madot(W.T, Ycentered.T))).T
W = np.asarray_chkfinite(np.linalg.solve(exp_x.T.dot(exp_x), madot(exp_x.T, Ycentered))).T
except np.linalg.linalg.LinAlgError:
#"converged"
pass
return np.asarray_chkfinite(exp_x), np.asarray_chkfinite(W)
def ppca_missing_data_at_random(Y, Q, iters=100):
"""
EM implementation of Probabilistic pca for when there is missing data.
Taken from <SheffieldML, https://github.com/SheffieldML>
.. math:
\\mathbf{Y} = \mathbf{XW} + \\epsilon \\text{, where}
\\epsilon = \\mathcal{N}(0, \\sigma^2 \mathbf{I})
:returns: X, W, sigma^2
"""
from numpy.ma import dot as madot
import diag
from GPy.util.subarray_and_sorting import common_subarrays
import time
debug = 1
# Initialise W randomly
N, D = Y.shape
W = np.random.randn(Q, D) * 1e-3
Y = np.ma.masked_invalid(Y, copy=1)
nu = 1.
#num_obs_i = 1./Y.count()
Ycentered = Y - Y.mean(0)
X = np.zeros((N,Q))
cs = common_subarrays(Y.mask)
cr = common_subarrays(Y.mask, 1)
Sigma = np.zeros((N, Q, Q))
Sigma2 = np.zeros((N, Q, Q))
mu = np.zeros(D)
"""
if debug:
import matplotlib.pyplot as pylab
fig = pylab.figure("FIT MISSING DATA");
ax = fig.gca()
ax.cla()
lines = pylab.plot(np.zeros((N,Q)).dot(W))
"""
W2 = np.zeros((Q,D))
for i in range(iters):
# Sigma = np.linalg.solve(diag.add(madot(W,W.T), nu), diag.times(np.eye(Q),nu))
# exp_x = madot(madot(Ycentered, W.T),Sigma)/nu
# Ycentered = (Y - exp_x.dot(W).mean(0))
# #import ipdb;ipdb.set_trace()
# #Ycentered = mu
# W = np.linalg.solve(madot(exp_x.T,exp_x) + Sigma, madot(exp_x.T, Ycentered))
# nu = (((Ycentered - madot(exp_x, W))**2).sum(0) + madot(W.T,madot(Sigma,W)).sum(0)).sum()/N
for csi, (mask, index) in enumerate(cs.iteritems()):
mask = ~np.array(mask)
Sigma2[index, :, :] = nu * np.linalg.inv(diag.add(W2[:,mask].dot(W2[:,mask].T), nu))
#X[index,:] = madot((Sigma[csi]/nu),madot(W,Ycentered[index].T))[:,0]
X2 = ((Sigma2/nu) * (madot(Ycentered,W2.T).base)[:,:,None]).sum(-1)
mu2 = (Y - X.dot(W)).mean(0)
for n in range(N):
Sigma[n] = nu * np.linalg.inv(diag.add(W[:,~Y.mask[n]].dot(W[:,~Y.mask[n]].T), nu))
X[n, :] = (Sigma[n]/nu).dot(W[:,~Y.mask[n]].dot(Ycentered[n,~Y.mask[n]].T))
for d in range(D):
mu[d] = (Y[~Y.mask[:,d], d] - X[~Y.mask[:,d]].dot(W[:, d])).mean()
Ycentered = (Y - mu)
nu3 = 0.
for cri, (mask, index) in enumerate(cr.iteritems()):
mask = ~np.array(mask)
W2[:,index] = np.linalg.solve(X[mask].T.dot(X[mask]) + Sigma[mask].sum(0), madot(X[mask].T, Ycentered[mask,index]))[:,None]
W2[:,index] = np.linalg.solve(X.T.dot(X) + Sigma.sum(0), madot(X.T, Ycentered[:,index]))
#nu += (((Ycentered[mask,index] - X[mask].dot(W[:,index]))**2).sum(0) + W[:,index].T.dot(Sigma[mask].sum(0).dot(W[:,index])).sum(0)).sum()
nu3 += (((Ycentered[index] - X.dot(W[:,index]))**2).sum(0) + W[:,index].T.dot(Sigma.sum(0).dot(W[:,index])).sum(0)).sum()
nu3 /= N
nu = 0.
nu2 = 0.
W = np.zeros((Q,D))
for j in range(D):
W[:,j] = np.linalg.solve(X[~Y.mask[:,j]].T.dot(X[~Y.mask[:,j]]) + Sigma[~Y.mask[:,j]].sum(0), madot(X[~Y.mask[:,j]].T, Ycentered[~Y.mask[:,j],j]))
nu2f = np.tensordot(W[:,j].T, Sigma[~Y.mask[:,j],:,:], [0,1]).dot(W[:,j])
nu2s = W[:,j].T.dot(Sigma[~Y.mask[:,j],:,:].sum(0).dot(W[:,j]))
nu2 += (((Ycentered[~Y.mask[:,j],j] - X[~Y.mask[:,j],:].dot(W[:,j]))**2) + nu2f).sum()
for i in range(N):
if not Y.mask[i,j]:
nu += ((Ycentered[i,j] - X[i,:].dot(W[:,j]))**2) + W[:,j].T.dot(Sigma[i,:,:].dot(W[:,j]))
nu /= N
nu2 /= N
nu4 = (((Ycentered - X.dot(W))**2).sum(0) + W.T.dot(Sigma.sum(0).dot(W)).sum(0)).sum()/N
import ipdb;ipdb.set_trace()
"""
if debug:
#print Sigma[0]
print "nu:", nu, "sum(X):", X.sum()
pred_y = X.dot(W)
for x, l in zip(pred_y.T, lines):
l.set_ydata(x)
ax.autoscale_view()
ax.set_ylim(pred_y.min(), pred_y.max())
fig.canvas.draw()
time.sleep(.3)
"""
return np.asarray_chkfinite(X), np.asarray_chkfinite(W), nu
def tdot_numpy(mat, out=None):
return np.dot(mat, mat.T, out)
def tdot_blas(mat, out=None):
"""returns np.dot(mat, mat.T), but faster for large 2D arrays of doubles."""
if (mat.dtype != 'float64') or (len(mat.shape) != 2):
return np.dot(mat, mat.T)
nn = mat.shape[0]
if out is None:
out = np.zeros((nn, nn))
else:
assert(out.dtype == 'float64')
assert(out.shape == (nn, nn))
# FIXME: should allow non-contiguous out, and copy output into it:
assert(8 in out.strides)
# zeroing needed because of dumb way I copy across triangular answer
out[:] = 0.0
# # Call to DSYRK from BLAS
# If already in Fortran order (rare), and has the right sorts of strides I
# could avoid the copy. I also thought swapping to cblas API would allow use
# of C order. However, I tried that and had errors with large matrices:
# http://homepages.inf.ed.ac.uk/imurray2/code/tdot/tdot_broken.py
mat = np.asfortranarray(mat)
TRANS = c_char('n')
N = c_int(mat.shape[0])
K = c_int(mat.shape[1])
LDA = c_int(mat.shape[0])
UPLO = c_char('l')
ALPHA = c_double(1.0)
A = mat.ctypes.data_as(ctypes.c_void_p)
BETA = c_double(0.0)
C = out.ctypes.data_as(ctypes.c_void_p)
LDC = c_int(np.max(out.strides) / 8)
dsyrk(byref(UPLO), byref(TRANS), byref(N), byref(K),
byref(ALPHA), A, byref(LDA), byref(BETA), C, byref(LDC))
symmetrify(out, upper=True)
return np.ascontiguousarray(out)
def tdot(*args, **kwargs):
if _blas_available:
return tdot_blas(*args, **kwargs)
else:
return tdot_numpy(*args, **kwargs)
def DSYR_blas(A, x, alpha=1.):
"""
Performs a symmetric rank-1 update operation:
A <- A + alpha * np.dot(x,x.T)
:param A: Symmetric NxN np.array
:param x: Nx1 np.array
:param alpha: scalar
"""
N = c_int(A.shape[0])
LDA = c_int(A.shape[0])
UPLO = c_char('l')
ALPHA = c_double(alpha)
A_ = A.ctypes.data_as(ctypes.c_void_p)
x_ = x.ctypes.data_as(ctypes.c_void_p)
INCX = c_int(1)
dsyr(byref(UPLO), byref(N), byref(ALPHA),
x_, byref(INCX), A_, byref(LDA))
symmetrify(A, upper=True)
def DSYR_numpy(A, x, alpha=1.):
"""
Performs a symmetric rank-1 update operation:
A <- A + alpha * np.dot(x,x.T)
:param A: Symmetric NxN np.array
:param x: Nx1 np.array
:param alpha: scalar
"""
A += alpha * np.dot(x[:, None], x[None, :])
def DSYR(*args, **kwargs):
if _blas_available:
return DSYR_blas(*args, **kwargs)
else:
return DSYR_numpy(*args, **kwargs)
def symmetrify(A, upper=False):
"""
Take the square matrix A and make it symmetrical by copting elements from the lower half to the upper
works IN PLACE.
"""
triu = np.triu_indices_from(A,k=1)
if upper:
A.T[triu] = A[triu]
else:
A[triu] = A.T[triu]
# N, M = A.shape
# assert N == M
# from .cython import linalg as c_linalg
# return c_linalg.symmetrify(A, N=N, upper=upper)
# c_contig_code = """
# int iN;
# for (int i=1; i<N; i++){
# iN = i*N;
# for (int j=0; j<i; j++){
# A[i+j*N] = A[iN+j];
# }
# }
# """
# f_contig_code = """
# int iN;
# for (int i=1; i<N; i++){
# iN = i*N;
# for (int j=0; j<i; j++){
# A[iN+j] = A[i+j*N];
# }
# }
# """
# N = int(N) # for safe type casting
# if A.flags['C_CONTIGUOUS'] and upper:
# weave.inline(f_contig_code, ['A', 'N'], extra_compile_args=['-O3'])
# elif A.flags['C_CONTIGUOUS'] and not upper:
# weave.inline(c_contig_code, ['A', 'N'], extra_compile_args=['-O3'])
# elif A.flags['F_CONTIGUOUS'] and upper:
# weave.inline(c_contig_code, ['A', 'N'], extra_compile_args=['-O3'])
# elif A.flags['F_CONTIGUOUS'] and not upper:
# weave.inline(f_contig_code, ['A', 'N'], extra_compile_args=['-O3'])
# else:
# if upper:
# tmp = np.tril(A.T)
# else:
# tmp = np.tril(A)
# A[:] = 0.0
# A += tmp
# A += np.tril(tmp, -1).T
def symmetrify_murray(A):
A += A.T
nn = A.shape[0]
A[[range(nn), range(nn)]] /= 2.0
def cholupdate(L, x):
"""
update the LOWER cholesky factor of a pd matrix IN PLACE
if L is the lower chol. of K, then this function computes L\_
where L\_ is the lower chol of K + x*x^T
"""
support_code = """
#include <math.h>
"""
code = """
double r,c,s;
int j,i;
for(j=0; j<N; j++){
r = sqrt(L(j,j)*L(j,j) + x(j)*x(j));
c = r / L(j,j);
s = x(j) / L(j,j);
L(j,j) = r;
for (i=j+1; i<N; i++){
L(i,j) = (L(i,j) + s*x(i))/c;
x(i) = c*x(i) - s*L(i,j);
}
}
"""
x = x.copy()
N = x.size
weave.inline(code, support_code=support_code, arg_names=['N', 'L', 'x'], type_converters=weave.converters.blitz)
def backsub_both_sides(L, X, transpose='left'):
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
if transpose == 'left':
tmp, _ = dtrtrs(L, X, lower=1, trans=1)
return dtrtrs(L, tmp.T, lower=1, trans=1)[0].T
else:
tmp, _ = dtrtrs(L, X, lower=1, trans=0)
return dtrtrs(L, tmp.T, lower=1, trans=0)[0].T
| apache-2.0 |
Scan-o-Matic/scanomatic | setup.py | 1 | 2375 | #!/usr/bin/env python
from __future__ import absolute_import
import os
from setuptools import find_packages, setup
from scanomatic import get_version
setup(
name="Scan-o-Matic",
version=get_version(),
description="High Throughput Solid Media Image Phenotyping Platform",
long_description="""Scan-o-Matic is a high precision phenotyping platform
that uses scanners to obtain images of yeast colonies growing on solid
substrate.
The package contains a user interface as well as an extensive package
for yeast colony analysis from scanned images.
""",
author="Martin Zackrisson",
author_email="[email protected]",
url="https://github.com/Scan-o-Matic/scanomatic",
packages=find_packages(include=['scanomatic*']),
package_data={
"scanomatic": [
'ui_server_data/*.html',
'ui_server_data/js/*.js',
'ui_server_data/js/external/*.js',
'ui_server_data/style/*.css',
'ui_server_data/fonts/*',
'ui_server/templates/*',
'images/*',
'util/birds.txt',
'util/adjectives.txt',
],
'scanomatic.data': [
'migrations/env.py',
'migrations/alembic.ini',
'migrations/versions/*.py',
],
},
scripts=[
os.path.join("scripts", p) for p in [
"scan-o-matic_migrate",
"scan-o-matic_server",
]
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: X11 Application :: GTK',
'Environment :: Console',
'Intended Autdience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
install_requires=[
'alembic',
'chardet',
'enum34',
'flask',
'flask-restful',
'future',
'matplotlib',
'numpy',
'pandas',
'pillow',
'prometheus-client',
'psutil',
'psycopg2-binary',
'pytz',
'requests',
'scikit-image',
'scipy',
'setproctitle',
'sqlalchemy',
'xlrd',
],
)
| gpl-3.0 |
oczkers/fut | docs/core.py | 2 | 8519 | # -*- coding: utf-8 -*-
"""
futmarket.core
~~~~~~~~~~~~~~~~~~~~~
This module implements the futmarket's basic methods.
"""
# Imports
## Relies heavily on fut package.
import fut
import pandas as pd
from time import sleep
# Login
## Login to EA Sports. May require two-factor authentication. You will be prompted for code, which is likely in email inbox.
def login():
global fut
print('Email: ')
email = raw_input()
print('Password: ')
password = raw_input()
print('Secret: ')
secret = raw_input()
print('platform: [pc/ps3/ps4/xbox/xbox360] ')
platform = raw_input()
print('Loading...')
fut = fut.Core(email, password, secret, platform)
print('You have logged in successfully.')
# Keepailve
## Run this every ~10 mins so the program continues to run
def keepalive():
global coins
coins = fut.keepalive()
return(coins)
# Sold
## Clean up tradepile of those who sold
def sold():
tradepile = fut.tradepile()
sold = []
bids = []
for i in range(0, len(tradepile)):
if tradepile[i]['tradeState'] == 'closed':
sold.append(fut.players[tradepile[i]['assetId']]['lastname'])
bids.append(tradepile[i]['currentBid'])
print('Sold %s %s for %s coins' % (fut.players[tradepile[i]['assetId']]['firstname'], fut.players[tradepile[i]['assetId']]['lastname'], tradepile[i]['currentBid']))
fut.tradepileDelete(tradepile[i]['tradeId'])
return('Sold %s players for %s coins' % (len(sold), sum(bids)))
# Not Sold
## Clean up tradepile of those that did not sell
def not_sold():
tradepile = fut.tradepile()
for i in range(0, len(tradepile)):
if (tradepile[i]['tradeState'] == 'expired') or (tradepile[i]['tradeState'] == None):
print('Did not sell %s %s. Moved back to team.' % (fut.players[tradepile[i]['assetId']]['firstname'], fut.players[tradepile[i]['assetId']]['lastname']))
fut.sendToClub(tradepile[i]['id'])
# Active
## Gets active trades in tradepile
def active():
tradepile = fut.tradepile()
global active_players
active_players = []
for i in range(0, len(tradepile)):
if (tradepile[i]['tradeState'] == 'active'):
active_players.append(tradepile[i]['assetId'])
print("""Actively selling %s %s. Expires in %s minutes. %s bids so far and a current price of %s.""" %
(fut.players[tradepile[i]['assetId']]['firstname'], fut.players[tradepile[i]['assetId']]['lastname'],
int(round(tradepile[i]['expires']/60)), tradepile[i]['offers'], tradepile[i]['currentBid']))
# My Team
## Get names and attributes of team members, including last sale price
def my_team():
sold()
not_sold()
myclub = fut.club()
my_auction = pd.DataFrame(myclub)
my_auction = my_auction[my_auction['untradeable'] == False]
assetIds = my_auction['assetId'].tolist()
ids = my_auction['id'].tolist()
firstnames = []
lastnames = []
for i in assetIds:
firstnames.append(fut.players[i]['firstname'])
lastnames.append(fut.players[i]['lastname'])
players = [i + ' ' + j for i, j in zip(firstnames, lastnames)]
position = my_auction['position'].tolist()
rating = my_auction['rating'].tolist()
contract = my_auction['contract'].tolist()
lastSalePrice = my_auction['lastSalePrice'].tolist()
discardValue = my_auction['discardValue'].tolist()
my_values = [max(lastSalePrice, discardValue) for lastSalePrice, discardValue in zip(lastSalePrice, discardValue)]
global team
team = pd.DataFrame(
{'assetId': assetIds,
'id': ids,
'name': players,
'position': position,
'rating': rating,
'contract': contract,
'my_value': my_values
}
)
return(team)
# Min value function
def mins(items, n):
mins = items[:n]
mins.sort()
for i in items[n:]:
if i < mins[-1]:
mins.append(i)
mins.sort()
mins= mins[:n]
return(mins)
# Median value function
def median(lst):
n = len(lst)
if n < 1:
return None
if n % 2 == 1:
return sorted(lst)[n//2]
else:
return sum(sorted(lst)[n//2-1:n//2+1])/2.0
# My Market
## Get market for my tradeable players. Constrain by page depth (time) and strategy option.
def my_market(depth=1, strategy=1):
# See if team exists yet as a variable
try:
team
except NameError:
my_team()
else:
mkt_value = []
# Loop through each team member to get market values
for i in range(0, len(team)):
print('Getting market value for: %s' % (team['name'][i]))
mkt_values = []
for page in range(0, depth):
for d in fut.search(ctype='player', assetId=str(team['assetId'][i]), page_size='50', start=page):
mkt_values.append({'buy': d['buyNowPrice'], 'tradeId': d['tradeId']})
if strategy == 1:
# Gets median of min 5 market values
mkt_value.append(median(mins(mkt_values, 5)))
print('Checked %s players. Market value of %s coins added for %s' % (depth*50, mkt_value[i]['buy'], team['name'][i]))
if strategy == 2:
# New strategy here
###
print('Checked %s players. Market value of %s coins added for %s' % (depth*50, mkt_value[i]['buy'], team['name'][i]))
if strategy == 3:
# New strategy here
###
print('Checked %s players. Market value of %s coins added for %s' % (depth*50, mkt_value[i]['buy'], team['name'][i]))
# Create a dataframe of market values that merges with team members
mkt_value = pd.Series(mkt_value).values
sell = []
for i in mkt_value:
sell.append(i['buy'])
market = pd.DataFrame(
{'mkt_value': sell}
)
global team_market
team_market = pd.merge(team, market, left_index=True, right_index=True)
# List
## Put players on the block, send to tradepile and sell
def list_players(min_value=300, strategy=1):
my_market()
print('Cleaning up tradepile...')
sold()
not_sold()
active()
tradepile = fut.tradepile()
# Get players on the block
global block
block = team_market[team_market['my_value']>min_value]
print('%s players on the block with a value to you of %s coins and a market value of %s coins' % (len(block), block['my_value'].sum(), block['mkt_value'].sum()))
global quicksell
quicksell = team_market[team_market['my_value']<=min_value]
if len(quicksell) == 0:
print('No items to quicksell.')
else:
for index, row in quicksell.iterrows():
fut.quickSell(row['id'])
print('Quick sold %s items for %s coins' % (len(quicksell), quicksell['my_value'].sum()))
# Get available space and send players from block to tradepile
available_space = fut.pileSize().get('tradepile') - len(tradepile)
block = block.head(n=available_space)
for index, row in block.iterrows():
if row['assetId'] in active_players:
block.drop(index, inplace=True)
if len(block) == 0:
print('No players to list on market.')
else:
print('%s players can be added to tradepile.' % (len(block)))
for index, row in block.iterrows():
fut.sendToTradepile(row['id'])
sleep(2)
print('%s added to tradepile.' % (row['name']))
print('%s players successfully added to tradepile.' % (len(block)))
# List players on market
# Strategy 1: List at my value, buy now at market value.
if strategy==1:
for index, row in block.iterrows():
if row['mkt_value'] > row['my_value']:
fut.sell(item_id = row['id'], bid = row['my_value'], buy_now = row['mkt_value'], duration = 3600)
print('%s has been listed on the market for %s coins to buy now and %s coins starting bid' % (row['name'], row['mkt_value'], row['my_value']))
if row['mkt_value'] <= row['my_value']:
print('The market value for %s is below or equal to what you originally paid. Moving this card back to team for now.' % (row['name']))
fut.sendToClub(row['id'])
sleep(10)
print('All tradeable players have been listed on the market. Check back in an hour to see if they sold')
| gpl-3.0 |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part05-e08_bicycle_timeseries/test/test_bicycle_timeseries.py | 1 | 2279 | #!/usr/bin/env python3
import unittest
from unittest.mock import patch, MagicMock
import pandas as pd
import numpy as np
from tmc import points
from tmc.utils import load, get_out, patch_helper
module_name="src.bicycle_timeseries"
bicycle_timeseries = load(module_name, "bicycle_timeseries")
main = load(module_name, "main")
ph = patch_helper(module_name)
@points('p05-08.1')
class BicycleTimeseries(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# cls.df = bicycle_timeseries()
def setUp(self):
self.df = bicycle_timeseries()
def test_shape(self):
self.assertEqual(self.df.shape, (37128, 20), msg="Incorrect shape!")
def test_columns(self):
cols = ['Auroransilta', 'Eteläesplanadi', 'Huopalahti (asema)',
'Kaisaniemi/Eläintarhanlahti', 'Kaivokatu', 'Kulosaaren silta et.',
'Kulosaaren silta po. ', 'Kuusisaarentie', 'Käpylä, Pohjoisbaana',
'Lauttasaaren silta eteläpuoli', 'Merikannontie',
'Munkkiniemen silta eteläpuoli', 'Munkkiniemi silta pohjoispuoli',
'Heperian puisto/Ooppera', 'Pitkäsilta itäpuoli',
'Pitkäsilta länsipuoli', 'Lauttasaaren silta pohjoispuoli',
'Ratapihantie', 'Viikintie', 'Baana']
np.testing.assert_array_equal(self.df.columns, cols, err_msg="Incorrect columns!")
def test_index(self):
self.assertIsInstance(self.df.index[0], pd.Timestamp,
msg="Expected index to have type timestamp!")
self.assertEqual(self.df.index[0], pd.to_datetime("2014-1-1 00:00"),
msg="Incorrect first index!")
self.assertEqual(self.df.index[1], pd.to_datetime("2014-1-1 01:00"),
msg="Incorrect second index!")
def test_calls(self):
with patch(ph("bicycle_timeseries"), wraps=bicycle_timeseries) as pbts,\
patch(ph("pd.read_csv"), wraps=pd.read_csv) as prc,\
patch(ph("pd.to_datetime"), wraps=pd.to_datetime) as pdatetime:
main()
pbts.assert_called_once()
prc.assert_called_once()
pdatetime.assert_called()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Nyker510/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
TimoRoth/oggm | oggm/tests/conftest.py | 1 | 9836 | """Pytest fixtures to be used in other test modules"""
import os
import shutil
import logging
import getpass
from functools import wraps
import numpy as np
import pytest
import shapely.geometry as shpg
import matplotlib.pyplot as plt
from oggm.shop import cru, histalp, ecmwf
from oggm import cfg, tasks
from oggm.core import flowline
from oggm.tests.funcs import init_hef, get_test_dir
from oggm import utils
from oggm.utils import mkdir, _downloads
from oggm.utils import oggm_urlretrieve
from oggm.tests import HAS_MPL_FOR_TESTS, HAS_INTERNET
logger = logging.getLogger(__name__)
def pytest_configure(config):
for marker in ["slow", "download", "creds", "internet", "test_env",
"graphic "]:
config.addinivalue_line("markers", marker)
if config.pluginmanager.hasplugin('xdist'):
try:
from ilock import ILock
utils.lock = ILock("oggm_xdist_download_lock_" + getpass.getuser())
logger.info("ilock locking setup successfully for xdist tests")
except BaseException:
logger.warning("could not setup ilock locking for distributed "
"tests")
def pytest_addoption(parser):
parser.addoption("--run-slow", action="store_true", default=False,
help="Run slow tests")
parser.addoption("--run-download", action="store_true", default=False,
help="Run download tests")
parser.addoption("--run-creds", action="store_true", default=False,
help="Run download tests requiring credentials")
parser.addoption("--run-test-env", metavar="ENVNAME", default="",
help="Run only specified test env")
parser.addoption("--no-run-internet", action="store_true", default=False,
help="Don't run any tests accessing the internet")
def pytest_collection_modifyitems(config, items):
use_internet = HAS_INTERNET and not config.getoption("--no-run-internet")
skip_slow = not config.getoption("--run-slow")
skip_download = not use_internet or not config.getoption("--run-download")
skip_cred = skip_download or not config.getoption("--run-creds")
run_test_env = config.getoption("--run-test-env")
slow_marker = pytest.mark.skip(reason="need --run-slow option to run")
download_marker = pytest.mark.skip(reason="need --run-download option to "
"run, internet access is "
"required")
cred_marker = pytest.mark.skip(reason="need --run-creds option to run, "
"internet access is required")
internet_marker = pytest.mark.skip(reason="internet access is required")
test_env_marker = pytest.mark.skip(reason="only test_env=%s tests are run"
% run_test_env)
graphic_marker = pytest.mark.skip(reason="requires mpl V1.5+ and "
"pytest-mpl")
for item in items:
if skip_slow and "slow" in item.keywords:
item.add_marker(slow_marker)
if skip_download and "download" in item.keywords:
item.add_marker(download_marker)
if skip_cred and "creds" in item.keywords:
item.add_marker(cred_marker)
if not use_internet and "internet" in item.keywords:
item.add_marker(internet_marker)
if run_test_env:
test_env = item.get_closest_marker("test_env")
if not test_env or test_env.args[0] != run_test_env:
item.add_marker(test_env_marker)
if "graphic" in item.keywords:
def wrap_graphic_test(test):
@wraps(test)
def test_wrapper(*args, **kwargs):
try:
return test(*args, **kwargs)
finally:
plt.close()
return test_wrapper
item.obj = wrap_graphic_test(item.obj)
if not HAS_MPL_FOR_TESTS:
item.add_marker(graphic_marker)
@pytest.fixture(autouse=True)
def patch_data_urls(monkeypatch):
"""This makes sure we never download the big files with our tests"""
url = 'https://cluster.klima.uni-bremen.de/~oggm/test_climate/'
monkeypatch.setattr(cru, 'CRU_SERVER', url + 'cru/')
monkeypatch.setattr(cru, 'CRU_BASE', 'cru_ts3.23.1901.2014.{}.dat.nc')
monkeypatch.setattr(histalp, 'HISTALP_SERVER', url + 'histalp/')
monkeypatch.setattr(ecmwf, 'ECMWF_SERVER', url)
basenames = {
'ERA5': {
'inv': 'era5/monthly/v1.0/era5_invariant.nc',
'pre': 'era5/monthly/v1.0/era5_monthly_prcp_1979-2018.nc',
'tmp': 'era5/monthly/v1.0/era5_monthly_t2m_1979-2018.nc'
},
'ERA5L': {
'inv': 'era5-land/monthly/v1.0/era5_land_invariant_flat.nc',
'pre': 'era5-land/monthly/v1.0/era5_land_monthly_prcp_1981-2018_flat'
'.nc',
'tmp': 'era5-land/monthly/v1.0/era5_land_monthly_t2m_1981-2018_flat.nc'
},
'CERA': {
'inv': 'cera-20c/monthly/v1.0/cera-20c_invariant.nc',
'pre': 'cera-20c/monthly/v1.0/cera-20c_pcp_1901-2010.nc',
'tmp': 'cera-20c/monthly/v1.0/cera-20c_t2m_1901-2010.nc'
},
'ERA5dr': {
'inv': 'era5/monthly/vdr/ERA5_geopotential_monthly.nc',
'lapserates': 'era5/monthly/vdr/ERA5_lapserates_monthly.nc',
'tmp': 'era5/monthly/vdr/ERA5_temp_monthly.nc',
'tempstd': 'era5/monthly/vdr/ERA5_tempstd_monthly.nc',
'pre': 'era5/monthly/vdr/ERA5_totalprecip_monthly.nc',
}
}
monkeypatch.setattr(ecmwf, 'BASENAMES', basenames)
def secure_url_retrieve(url, *args, **kwargs):
"""A simple patch to OGGM's download function to make sure we don't
download elsewhere than expected."""
assert ('github' in url or
'cluster.klima.uni-bremen.de/~oggm/ref_mb_params' in url or
'cluster.klima.uni-bremen.de/~oggm/test_gdirs/' in url or
'cluster.klima.uni-bremen.de/~oggm/demo_gdirs/' in url or
'cluster.klima.uni-bremen.de/~oggm/test_climate/' in url or
'klima.uni-bremen.de/~oggm/climate/cru/cru_cl2.nc.zip' in url
)
return oggm_urlretrieve(url, *args, **kwargs)
@pytest.fixture(autouse=True)
def patch_url_retrieve(monkeypatch):
monkeypatch.setattr(_downloads, 'oggm_urlretrieve', secure_url_retrieve)
@pytest.fixture()
def dummy_constant_bed():
dx = 1.
hmax = 3000.
hmin = 1000.
nx = 200
map_dx = 100.
widths = 3.
surface_h = np.linspace(hmax, hmin, nx)
bed_h = surface_h
widths = surface_h * 0. + widths
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
@pytest.fixture(scope='session')
def test_dir():
""" Provides a reference to the test directory for the entire test session.
Named after the current git revision.
As a session-scoped fixture, this will only be created once and
then injected to each test that depends on it.
"""
return get_test_dir()
def _setup_case_dir(call, test_dir):
casedir = os.path.join(test_dir, call.__name__)
mkdir(casedir, reset=True)
return casedir
def _teardown_case_dir(casedir):
if os.path.exists(casedir):
shutil.rmtree(casedir)
@pytest.fixture(scope='function')
def case_dir(request, test_dir):
""" Provides a unique directory for the current test function, a child of
the session test directory (test_dir > case_dir). Named after the
current test function.
As a function-scoped fixture, a new directory is created for
each function that uses this and then cleaned up when the case
completes.
"""
cd = _setup_case_dir(request.function, test_dir)
yield cd
_teardown_case_dir(cd)
@pytest.fixture(scope='class')
def class_case_dir(request, test_dir):
""" Provides a unique directory for the current test class, a child of
the session test directory (test_dir > class_case_dir). Named after
the current test class.
As a class-scoped fixture, a class directory is created once for
the current class and used by each test inside it. It is cleaned
up when the all the cases in the class complete.
"""
cd = _setup_case_dir(request.cls, test_dir)
yield cd
_teardown_case_dir(cd)
@pytest.fixture(scope='module')
def hef_gdir_base(request, test_dir):
""" Provides an initialized Hintereisferner glacier directory.
As a module fixture, the initialization is run only once per test
module that uses it.
IMPORTANT: To preserve a constant starting condition, hef_gdir_base
should almost never be directly injected into a test case. Test cases
should use the below hef_gdir fixture to provide a directory that has
been copied into an ephemeral case directory.
"""
try:
module = request.module
border = module.DOM_BORDER if module.DOM_BORDER is not None else 40
return init_hef(border=border)
except AttributeError:
return init_hef()
@pytest.fixture(scope='class')
def hef_gdir(hef_gdir_base, class_case_dir):
""" Provides a copy of the base Hintereisenferner glacier directory in
a case directory specific to the current test class. All cases in
the test class will use the same copy of this glacier directory.
"""
return tasks.copy_to_basedir(hef_gdir_base, base_dir=class_case_dir,
setup='all')
| bsd-3-clause |
mattjj/pyhsmm-slds | examples/demo.py | 2 | 2399 | from __future__ import division
import numpy as np
np.random.seed(0)
import matplotlib
# matplotlib.use("macosx") # might be necessary for animation to work
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import autoregressive
from pyhsmm.basic.distributions import PoissonDuration
from pybasicbayes.distributions import AutoRegression
from pyslds.models import DefaultSLDS
###################
# generate data #
###################
As = [np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
for alpha, theta in ((0.95,0.1), (0.95,-0.1), (1., 0.))]
truemodel = autoregressive.models.ARHSMM(
alpha=4., init_state_concentration=4.,
obs_distns=[AutoRegression(A=A, sigma=0.05*np.eye(2)) for A in As],
dur_distns=[PoissonDuration(alpha_0=5*50, beta_0=5) for _ in As])
truemodel.prefix = np.array([[0.,3.]])
data, labels = truemodel.generate(1000)
data = data[truemodel.nlags:]
plt.figure()
plt.plot(data[:,0],data[:,1],'x-')
plt.xlabel("$y_1$")
plt.ylabel("$y_2$")
#################
# build model #
#################
Kmax = 10 # number of latent discrete states
D_latent = 2 # latent linear dynamics' dimension
D_obs = 2 # data dimension
Cs = np.eye(D_obs) # Shared emission matrix
sigma_obss = 0.05 * np.eye(D_obs) # Emission noise covariance
model = DefaultSLDS(
K=Kmax, D_obs=D_obs, D_latent=D_latent,
Cs=Cs, sigma_obss=sigma_obss)
model.add_data(data)
model.resample_states()
##################
# run sampling #
##################
n_show = 50
samples = np.empty((n_show, data.shape[0]))
samples[:n_show] = model.stateseqs[0]
fig = plt.figure(figsize=(8,3))
gs = gridspec.GridSpec(6,1)
ax1 = fig.add_subplot(gs[:-1])
ax2 = fig.add_subplot(gs[-1], sharex=ax1)
im = ax1.matshow(samples[::-1], aspect='auto')
ax1.autoscale(False)
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_ylabel("Discrete State")
xo, yo, w, ht = ax1.bbox.bounds
h = ht / n_show
ax2.matshow(labels[None,:], aspect='auto')
ax2.set_xticks([])
ax2.set_xlabel("Time")
ax2.set_yticks([])
plt.draw()
plt.ion()
plt.show()
print("Press Ctrl-C to stop...")
from itertools import count
for itr in count():
model.resample_model()
samples[itr % n_show] = model.stateseqs[0]
im.set_array(samples[::-1])
plt.pause(0.001)
| mit |
fegonda/icon_demo | code/model/unet/g.py | 1 | 44565 | import os
import sys
import skimage.transform
import skimage.exposure
import time
import glob
import numpy as np
import mahotas
import random
import matplotlib
import matplotlib.pyplot as plt
import scipy
import scipy.ndimage
import json
from scipy.ndimage.filters import maximum_filter
base_path = os.path.dirname(__file__)
sys.path.insert(1,os.path.join(base_path, '../../common'))
sys.path.insert(2,os.path.join(base_path, '../../database'))
from utility import Utility
from settings import Paths
from project import Project
from paths import Paths
from db import DB
# the idea is to grow the labels to cover the whole membrane
# image and label should be [0,1]
def adjust_imprecise_boundaries(image, label, number_iterations=5):
label = label.copy()
label_orig = label.copy()
for i in xrange(number_iterations):
# grow labels by one pixel
label = maximum_filter(label, 2)
# only keep pixels that are on dark membrane
non_valid_label = np.logical_and(label==1, image>0.7)
label[non_valid_label] = 0
# make sure original labels are preserved
label = np.logical_or(label==1, label_orig==1)
return label
def deform_image(image):
# assumes image is uint8
def apply_deformation(image, coordinates):
# ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid.
deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect')
deformed = np.reshape(deformed, image.shape)
return deformed
displacement_x = np.random.normal(size=image.shape, scale=10)
displacement_y = np.random.normal(size=image.shape, scale=10)
# smooth over image
coords_x, coords_y = np.meshgrid(np.arange(0,image.shape[0]), np.arange(0,image.shape[1]), indexing='ij')
displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten()
displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten()
coordinates = np.vstack([displacement_x, displacement_y])
return apply_deformation(np.uint8(image*255), coordinates)
def deform_images(image1, image2, image3=None):
# assumes image is uint8
def apply_deformation(image, coordinates):
# ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid.
deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect')
deformed = np.reshape(deformed, image.shape)
return deformed
displacement_x = np.random.normal(size=image1.shape, scale=10)
displacement_y = np.random.normal(size=image1.shape, scale=10)
# smooth over image
coords_x, coords_y = np.meshgrid(np.arange(0,image1.shape[0]), np.arange(0,image1.shape[1]), indexing='ij')
displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten()
displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten()
coordinates = np.vstack([displacement_x, displacement_y])
deformed1 = apply_deformation(np.uint8(image1*255), coordinates)
deformed2 = apply_deformation(np.uint8(image2*255), coordinates)
if not image3 is None:
deformed3 = apply_deformation(image3, coordinates)
return (deformed1, deformed2, deformed3)
return (deformed1, deformed2)
def deform_images_list(images):
# assumes image is uint8
def apply_deformation(image, coordinates):
# ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid.
deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect')
deformed = np.reshape(deformed, image.shape)
return deformed
displacement_x = np.random.normal(size=images.shape[:2], scale=10)
displacement_y = np.random.normal(size=images.shape[:2], scale=10)
# smooth over image
coords_x, coords_y = np.meshgrid(np.arange(0,images.shape[0]), np.arange(0,images.shape[1]), indexing='ij')
displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten()
displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten()
coordinates = np.vstack([displacement_x, displacement_y])
deformed = images.copy()
for i in xrange(images.shape[2]):
deformed[:,:,i] = apply_deformation(np.uint8(images[:,:,i]), coordinates)
return deformed
def normalizeImage(img, saturation_level=0.05, doClahe=False): #was 0.005
if not doClahe:
sortedValues = np.sort( img.ravel())
minVal = np.float32(sortedValues[np.int(len(sortedValues) * (saturation_level / 2))])
maxVal = np.float32(sortedValues[np.int(len(sortedValues) * (1 - saturation_level / 2))])
normImg = np.float32(img - minVal) * (255 / (maxVal-minVal))
normImg[normImg<0] = 0
normImg[normImg>255] = 255
output = (np.float32(normImg) / 255.0)
return output
else:
output = skimage.exposure.equalize_adapthist(img)
return output
def generate_experiment_data_supervised(purpose='train', nsamples=1000, patchSize=29, balanceRate=0.5, rng=np.random):
start_time = time.time()
data_path = '/n/home00/fgonda/icon/data/reference'
#if os.path.exists('/media/vkaynig/Data1/Cmor_paper_data/'):
if os.path.exists( data_path ):
pathPrefix = data_path
#pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/'
else:
pathPrefix = '/n/pfister_lab/vkaynig/'
#img_search_string_membraneImages = pathPrefix + 'labels/membranes_nonDilate/' + purpose + '/*.tif'
#img_search_string_backgroundMaskImages = pathPrefix + 'labels/background_nonDilate/' + purpose + '/*.tif'
img_search_string_membraneImages = pathPrefix + 'labels/membranes/' + purpose + '/*.tif'
img_search_string_backgroundMaskImages = pathPrefix + 'labels/background/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_label = sorted( glob.glob( img_search_string_membraneImages ) )
img_files_backgroundMask = sorted( glob.glob( img_search_string_backgroundMaskImages ) )
whole_set_patches = np.zeros((nsamples, patchSize*patchSize), dtype=np.float)
whole_set_labels = np.zeros(nsamples, dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
for img_index in xrange(np.shape(img_files_gray)[0]):
img = mahotas.imread(img_files_gray[img_index])
img = normalizeImage(img)
grayImages[:,:,img_index] = img
label_img = mahotas.imread(img_files_label[img_index])
labelImages[:,:,img_index] = label_img
mask_img = mahotas.imread(img_files_backgroundMask[img_index])
maskImages[:,:,img_index] = mask_img
for img_index in xrange(np.shape(img_files_gray)[0]):
img = grayImages[:,:,img_index]
label_img = labelImages[:,:,img_index]
mask_img = maskImages[:,:,img_index]
#get rid of invalid image borders
border_patch = np.int(np.ceil(patchSize/2.0))
border = np.int(np.ceil(np.sqrt(2*(border_patch**2))))
label_img[:border,:] = 0 #top
label_img[-border:,:] = 0 #bottom
label_img[:,:border] = 0 #left
label_img[:,-border:] = 0 #right
mask_img[:border,:] = 0
mask_img[-border:,:] = 0
mask_img[:,:border] = 0
mask_img[:,-border:] = 0
membrane_indices = np.nonzero(label_img)
non_membrane_indices = np.nonzero(mask_img)
positiveSample = True
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
if positiveSample:
randmem = random.choice(xrange(len(membrane_indices[0])))
(row,col) = (membrane_indices[0][randmem],
membrane_indices[1][randmem])
label = 1.0
positiveSample = False
else:
randmem = random.choice(xrange(len(non_membrane_indices[0])))
(row,col) = (non_membrane_indices[0][randmem],
non_membrane_indices[1][randmem])
label = 0.0
positiveSample = True
imgPatch = img[row-border+1:row+border, col-border+1:col+border]
imgPatch = skimage.transform.rotate(imgPatch, random.choice(xrange(360)))
imgPatch = imgPatch[border-border_patch:border+border_patch-1,border-border_patch:border+border_patch-1]
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
imgPatch = np.rot90(imgPatch, random.randint(0,3))
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = label
counter += 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
#remove the sorting in image order
shuffleIndex = rng.permutation(np.shape(labels)[0])
for i in xrange(np.shape(labels)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i] = labels[shuffleIndex[i]]
data_set = (whole_data, whole_set_labels)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ' + '%.2fm' % (total_time / 60.)
rval = data_set
return rval
def generate_image_data(img, patchSize=29, rows=1):
img = normalizeImage(img)
# pad image borders
border = np.int(np.ceil(patchSize/2.0))
img_padded = np.pad(img, border, mode='reflect')
whole_set_patches = np.zeros((len(rows)*img.shape[1], patchSize**2))
counter = 0
for row in rows:
for col in xrange(img.shape[1]):
imgPatch = img_padded[row+1:row+2*border, col+1:col+2*border]
whole_set_patches[counter,:] = imgPatch.flatten()
counter += 1
#normalize data
whole_set_patches = np.float32(whole_set_patches)
whole_set_patches = whole_set_patches - 0.5
return whole_set_patches
def stupid_map_wrapper(parameters):
f = parameters[0]
args = parameters[1:]
return f(*args)
def get_sample_sizes(annotations):
samples_sizes = []
n_labels = len(annotations)
for coordinates in annotations:
n_label_samples_size = len(coordinates)/2
samples_sizes.append( n_label_samples_size )
return samples_sizes
def gen_membrane_image(annotations, dim):
m_image = np.zeros( (dim[0], dim[1]) )
label = 1
coordinates = annotations[ label ]
n_coordinates = len(coordinates)
i = 0
while i < n_coordinates:
x = coordinates[i]
y = coordinates[i+1]
m_image[x][y] = 1.0
i = i+2
return m_image
def generate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, project=None):
nr_layers=3
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
if purpose == 'train':
images = DB.getTrainingImages( project.id, new=False )
path = Paths.TrainGrayscale
else:
images = DB.getImages( project.id, purpose=1, new=False, annotated=True )
path = Paths.ValidGrayscale
files_gray = []
data_labels = []
label_sample_sizes = np.array([ 0, 0])
#imgs = DB.getImages( project.id )
for image in images:
d_path = '%s/%s.tif'%(path, image.id)
l_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)
if os.path.exists( d_path ) and os.path.exists( l_path ):
# load the annotations
with open( l_path ) as labels_f:
annotations = json.load( labels_f )
# skip if not enough samples in the annotations
sample_sizes = get_sample_sizes( annotations )
if np.sum( sample_sizes ) == 0:
continue
label_sample_sizes = label_sample_sizes + np.array(sample_sizes)
files_gray.append( d_path )
data_labels.append( annotations )
if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0:
return None
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) ))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
border_patch = np.ceil(patchSize/2.0)
pad = patchSize
read_order = np.random.permutation(np.shape(files_gray)[0])
for index in read_order:
file_image = files_gray[index]
labels = data_labels[index]
sample_sizes = get_sample_sizes( labels )
img = mahotas.imread(files_gray[index])
img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric')
# normalizes [0,1]
img = normalizeImage(img, doClahe=True)
membrane_img = gen_membrane_image( labels, img.shape )
#img_cs = int(np.floor(nr_layers/2))
#if purpose=='train':
# # adjust according to middle image
# membrane_img = adjust_imprecise_boundaries(img[:,:,img_cs], membrane_img, 0)
#get rid of invalid image borders
#membrane_img[:,-patchSize:] = 0
#membrane_img[-patchSize:,:] = 0
valid_indices = np.nonzero(membrane_img)
print valid_indices
if len(valid_indices[0]) == 0 or len(valid_indices[1]) == 0:
continue
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
randmem = random.choice(xrange(len(valid_indices[0])))
(row,col) = (valid_indices[0][randmem],
valid_indices[1][randmem])
imgPatch = img[row:row+patchSize, col:col+patchSize]
memPatch = membrane_img[row:row+patchSize, col:col+patchSize]
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
memPatch = np.fliplr(memPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
memPatch = np.rot90(memPatch, rotateInt)
#imgPatch = deform_image(imgPatch)
imgPatch, memPatch = deform_images( imgPatch, memPatch )
imgPatch = imgPatch / np.double(np.max(imgPatch))
memPatch = memPatch / np.double(np.max(memPatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
memPatch = memPatch[offset_small_patch:offset_small_patch+outPatchSize,offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = memPatch.flatten()
counter = counter + 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(labels)[0])
for i in xrange(np.shape(labels)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
data_set = (whole_data, whole_set_labels)
print np.min(whole_data), np.max(whole_data)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
def agenerate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, project=None):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
if purpose == 'train':
images = DB.getTrainingImages( project.id, new=False )
path = Paths.TrainGrayscale
else:
images = DB.getImages( project.id, purpose=1, new=False, annotated=True )
path = Paths.ValidGrayscale
files_gray = []
data_labels = []
label_sample_sizes = np.array([ 0, 0])
#imgs = DB.getImages( project.id )
for image in images:
d_path = '%s/%s.tif'%(path, image.id)
l_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)
if os.path.exists( d_path ) and os.path.exists( l_path ):
# load the annotations
with open( l_path ) as labels_f:
annotations = json.load( labels_f )
# skip if not enough samples in the annotations
sample_sizes = get_sample_sizes( annotations )
if np.sum( sample_sizes ) == 0:
continue
label_sample_sizes = label_sample_sizes + np.array(sample_sizes)
files_gray.append( d_path )
data_labels.append( annotations )
print len(files_gray)
print len(data_labels)
print label_sample_sizes
if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0:
return None
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) ))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
border_patch = np.ceil(patchSize/2.0)
pad = patchSize
read_order = np.random.permutation(np.shape(files_gray)[0])
for index in read_order:
file_image = files_gray[index]
labels = data_labels[index]
sample_sizes = get_sample_sizes( labels )
img = mahotas.imread(files_gray[index])
img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric')
# normalizes [0,1]
img = normalizeImage(img, doClahe=True)
membrane_img = gen_membrane_image( labels, img.shape )
print membrane_img.shape
print np.unique(membrane_img)
for label, coordinates in enumerate( labels ):
if counter >= nsamples:
break
ncoordinates = len(coordinates)
if ncoordinates == 0:
continue
# randomly sample from the label
indices = np.random.choice( ncoordinates, sample_sizes[label], replace=False)
for i in indices:
if i%2 == 1:
i = i-1
if counter >= nsamples:
break
col = coordinates[i]
row = coordinates[i+1]
r1 = int(row+patchSize-border_patch)
r2 = int(row+patchSize+border_patch)
c1 = int(col+patchSize-border_patch)
c2 = int(col+patchSize+border_patch)
imgPatch = img[r1:r2,c1:c2]
memPatch = membrane_img[r1:r2,c1:c2]
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
memPatch = np.fliplr(memPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
memPatch = np.rot90(memPatch, rotateInt)
#imgPatch = deform_image(imgPatch)
imgPatch, memPatch = deform_images( imgPatch, memPatch )
imgPatch = imgPatch / np.double(np.max(imgPatch))
memPatch = memPatch / np.double(np.max(memPatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
memPatch = memPatch[offset_small_patch:offset_small_patch+outPatchSize,offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = memPatch.flatten()
counter = counter + 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(labels)[0])
for i in xrange(np.shape(labels)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
data_set = (whole_data, whole_set_labels)
print np.min(whole_data), np.max(whole_data)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
def gen_validation_data(project, nsamples=1000, patchSize=29, outPatchSize=1):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
img_search_string_membraneImages = '%s/*.tif'%(Paths.ValidMembranes)
img_search_string_labelImages = '%s/*.tif'%(Paths.ValidLabels)
img_search_string_grayImages = '%s/*.tif'%(Paths.ValidGray)
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) )
img_files_labels = sorted( glob.glob( img_search_string_labelImages ) )
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
# read the data
# in random order
read_order = np.random.permutation(np.shape(img_files_gray)[0])
for img_index in read_order:
#print img_files_gray[img_index]
img = mahotas.imread(img_files_gray[img_index])
# normalizes [0,1]
img = normalizeImage(img, doClahe=True)
grayImages[:,:,img_index] = img
membrane_img = mahotas.imread(img_files_membrane[img_index])/255.
membraneImages[:,:,img_index] = membrane_img
maskImages[:,:,img_index] = 1.0
label_img = mahotas.imread(img_files_labels[img_index])
label_img = np.double(label_img)
if label_img.ndim == 3:
label_img = label_img[:,:,0] + 255*label_img[:,:,1] + 255**2 * label_img[:,:,2]
labelImages[:,:,img_index] = label_img
for img_index in xrange(np.shape(img_files_gray)[0]):
#print img_files_gray[read_order[img_index]]
img = grayImages[:,:,img_index]
label_img = labelImages[:,:,img_index]
membrane_img = membraneImages[:,:,img_index]
mask_img = maskImages[:,:,img_index]
#get rid of invalid image borders
mask_img[:,-patchSize:] = 0
mask_img[-patchSize:,:] = 0
valid_indices = np.nonzero(mask_img)
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
randmem = random.choice(xrange(len(valid_indices[0])))
(row,col) = (valid_indices[0][randmem],
valid_indices[1][randmem])
imgPatch = img[row:row+patchSize, col:col+patchSize]
membranePatch = membrane_img[row:row+patchSize, col:col+patchSize]
labelPatch = label_img[row:row+patchSize, col:col+patchSize]
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
membranePatch = np.fliplr(membranePatch)
labelPatch = np.fliplr(labelPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
membranePatch = np.rot90(membranePatch, rotateInt)
labelPatch = np.rot90(labelPatch, rotateInt)
labelPatch = relabel(labelPatch)
imgPatch, membranePatch, labelPatch = deform_images(imgPatch, membranePatch, np.uint8(labelPatch))
imgPatch = imgPatch / np.double(np.max(imgPatch))
membranePatch = membranePatch / np.double(np.max(membranePatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = labelPatch.flatten()
whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0)
counter += 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
membranes = whole_set_membranes.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(membranes)[0])
for i in xrange(np.shape(membranes)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
whole_set_membranes[i,:] = membranes[shuffleIndex[i],:]
data_set = (whole_data, whole_set_membranes, whole_set_labels)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
def gen_training_data(project, purpose='train', nsamples=1000, patchSize=29, outPatchSize=1):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
print 'gen_data'
if project == None:
return
start_time = time.time()
files_gray = []
files_membranes = []
if purpose == 'train':
images = DB.getTrainingImages( project.id, new=False )
path = Paths.TrainGrayscale
for image in images:
d_path = '%s/%s.tif'%(path, image.id)
m_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)
if os.path.exists( d_path ) and os.path.exists( l_path ):
'''
# load the annotations
with open( l_path ) as labels_f:
annotations = json.load( labels_f )
# skip if not enough samples in the annotations
sample_sizes = get_sample_sizes( annotations )
if np.sum( sample_sizes ) == 0:
continue
label_sample_sizes = label_sample_sizes + np.array(sample_sizes)
files_gray.append( d_path )
data_labels.append( annotations )
'''
files_gray.append( d_path )
files_membranes.append( m_path )
else:
images = DB.getImages( project.id, purpose=1, new=False, annotated=True )
path = Paths.ValidLabels
files_gray = []
data_labels = []
label_sample_sizes = np.array([ 0, 0])
if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0:
return None
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
return data
# changed the patch sampling to use upper left corner instead of middle pixel
# for patch labels it doesn't matter and it makes sampling even and odd patches easier
def oldgenerate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
# pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/'
# pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Thalamus-LGN/Data/25-175_train/'
#pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Cerebellum-P7/Dense/'
pathPrefix = '/n/home00/fgonda/icon/data/reference/'
if not os.path.exists(pathPrefix):
pathPrefix = '/n/pfister_lab/vkaynig/'
#img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif'
img_search_string_membraneImages = pathPrefix + 'labels/membranes/' + purpose + '/*.tif'
img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) )
img_files_labels = sorted( glob.glob( img_search_string_labelImages ) )
print len(img_files_gray)
print len(img_files_membrane)
print len(img_files_labels)
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
# read the data
# in random order
read_order = np.random.permutation(np.shape(img_files_gray)[0])
for img_index in read_order:
#print img_files_gray[img_index]
img = mahotas.imread(img_files_gray[img_index])
# normalizes [0,1]
img = normalizeImage(img, doClahe=True)
grayImages[:,:,img_index] = img
membrane_img = mahotas.imread(img_files_membrane[img_index])/255.
membraneImages[:,:,img_index] = membrane_img
maskImages[:,:,img_index] = 1.0
if purpose == 'validate':
label_img = mahotas.imread(img_files_labels[img_index])
label_img = np.double(label_img)
if label_img.ndim == 3:
label_img = label_img[:,:,0] + 255*label_img[:,:,1] + 255**2 * label_img[:,:,2]
labelImages[:,:,img_index] = label_img
for img_index in xrange(np.shape(img_files_gray)[0]):
#print img_files_gray[read_order[img_index]]
img = grayImages[:,:,img_index]
label_img = labelImages[:,:,img_index]
membrane_img = membraneImages[:,:,img_index]
mask_img = maskImages[:,:,img_index]
if purpose=='train':
membrane_img = adjust_imprecise_boundaries(img, membrane_img, 0)
#get rid of invalid image borders
mask_img[:,-patchSize:] = 0
mask_img[-patchSize:,:] = 0
valid_indices = np.nonzero(mask_img)
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
randmem = random.choice(xrange(len(valid_indices[0])))
(row,col) = (valid_indices[0][randmem],
valid_indices[1][randmem])
imgPatch = img[row:row+patchSize, col:col+patchSize]
membranePatch = membrane_img[row:row+patchSize, col:col+patchSize]
labelPatch = label_img[row:row+patchSize, col:col+patchSize]
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
membranePatch = np.fliplr(membranePatch)
if purpose == 'validate':
labelPatch = np.fliplr(labelPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
membranePatch = np.rot90(membranePatch, rotateInt)
if purpose=='validate':
labelPatch = np.rot90(labelPatch, rotateInt)
if purpose=='validate':
labelPatch = relabel(labelPatch)
imgPatch, membranePatch, labelPatch = deform_images(imgPatch, membranePatch, np.uint8(labelPatch))
else:
imgPatch, membranePatch = deform_images(imgPatch, membranePatch)
imgPatch = imgPatch / np.double(np.max(imgPatch))
membranePatch = membranePatch / np.double(np.max(membranePatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = labelPatch.flatten()
whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0)
counter += 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
membranes = whole_set_membranes.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(membranes)[0])
for i in xrange(np.shape(membranes)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
whole_set_membranes[i,:] = membranes[shuffleIndex[i],:]
if purpose == 'validate':
data_set = (whole_data, whole_set_membranes, whole_set_labels)
else:
data_set = (whole_data, whole_set_membranes)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
def generate_experiment_data_patch_prediction_layers(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, nr_layers=3):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
if os.path.exists('/media/vkaynig/Data1/Cmor_paper_data/'):
pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/'
else:
pathPrefix = '/n/pfister_lab/vkaynig/'
img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif'
img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) )
img_files_labels = sorted( glob.glob( img_search_string_labelImages ) )
whole_set_patches = np.zeros((nsamples, nr_layers, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
# read the data
# in random order
#read_order = np.random.permutation(np.shape(img_files_gray)[0])
for img_index in range(np.shape(img_files_gray)[0]):
#print img_files_gray[img_index]
img = mahotas.imread(img_files_gray[img_index])
# normalizes [0,1]
img = normalizeImage(img)
grayImages[:,:,img_index] = img
membrane_img = mahotas.imread(img_files_membrane[img_index])/255.
membraneImages[:,:,img_index] = membrane_img
maskImages[:,:,img_index] = 1.0
if purpose == 'validate':
label_img = mahotas.imread(img_files_labels[img_index])
label_img = np.double(label_img)
labelImages[:,:,img_index] = label_img
for img_index in xrange(np.shape(img_files_gray)[0]):
img_cs = int(np.floor(nr_layers/2))
img_valid_range_indices = np.clip(range(img_index-img_cs,img_index+img_cs+1),0,np.shape(img_files_gray)[0]-1)
img = grayImages[:,:,img_valid_range_indices]
label_img = labelImages[:,:,img_index]
membrane_img = membraneImages[:,:,img_index]
mask_img = maskImages[:,:,img_index]
if purpose=='train':
# adjust according to middle image
membrane_img = adjust_imprecise_boundaries(img[:,:,img_cs], membrane_img, 0)
#get rid of invalid image borders
mask_img[:,-patchSize:] = 0
mask_img[-patchSize:,:] = 0
valid_indices = np.nonzero(mask_img)
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
randmem = random.choice(xrange(len(valid_indices[0])))
(row,col) = (valid_indices[0][randmem],
valid_indices[1][randmem])
imgPatch = img[row:row+patchSize, col:col+patchSize,:]
membranePatch = membrane_img[row:row+patchSize, col:col+patchSize]
labelPatch = label_img[row:row+patchSize, col:col+patchSize]
if random.random() < 0.5:
for flip_i in xrange(nr_layers):
imgPatch[:,:,flip_i] = np.fliplr(imgPatch[:,:,flip_i])
membranePatch = np.fliplr(membranePatch)
if purpose == 'validate':
labelPatch = np.fliplr(labelPatch)
rotateInt = random.randint(0,3)
for rot_i in xrange(nr_layers):
imgPatch[:,:,rot_i] = np.rot90(imgPatch[:,:,rot_i], rotateInt)
membranePatch = np.rot90(membranePatch, rotateInt)
if purpose=='validate':
labelPatch = np.rot90(labelPatch, rotateInt)
if purpose=='validate':
labelPatch = relabel(labelPatch)
deformed_images = deform_images_list(np.dstack([imgPatch*255, np.reshape(membranePatch*255,(patchSize,patchSize,1)), np.uint8(np.reshape(labelPatch,(patchSize,patchSize,1)))]))
imgPatch, membranePatch, labelPatch = np.split(deformed_images,[imgPatch.shape[2],imgPatch.shape[2]+1], axis=2)
else:
deformed_images = deform_images_list(np.dstack([imgPatch*255, np.reshape(membranePatch,(patchSize,patchSize,1))*255]))
imgPatch, membranePatch = np.split(deformed_images,[imgPatch.shape[2]], axis=2)
imgPatch = imgPatch / np.double(np.max(imgPatch))
membranePatch = membranePatch / np.double(np.max(membranePatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
#whole_set_patches = np.zeros((nsamples, nr_layers, patchSize**2), dtype=np.float)
for patch_i in xrange(nr_layers):
whole_set_patches[counter,patch_i,:] = imgPatch[:,:,patch_i].flatten()
whole_set_labels[counter] = labelPatch.flatten()
whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0)
counter += 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
membranes = whole_set_membranes.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(membranes)[0])
for i in xrange(np.shape(membranes)[0]):
whole_data[i,:,:] = data[shuffleIndex[i],:,:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
whole_set_membranes[i,:] = membranes[shuffleIndex[i],:]
if purpose == 'validate':
data_set = (whole_data, whole_set_membranes, whole_set_labels)
else:
data_set = (whole_data, whole_set_membranes)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
if __name__=="__main__":
import uuid
test = generate_experiment_data_patch_prediction(purpose='train', nsamples=30, patchSize=572, outPatchSize=388)
dir_path = './training_patches/'
for i in xrange(30):
unique_filename = str(uuid.uuid4())
img = np.reshape(test[1][i],(388,388))
img_gray = np.reshape(test[0][i],(572,572))
mahotas.imsave(dir_path+unique_filename+'.tif', np.uint8(img*255))
mahotas.imsave(dir_path+unique_filename+'_gray.tif', np.uint8((img_gray+0.5)*255))
#data_val = generate_experiment_data_supervised(purpose='validate', nsamples=10000, patchSize=65, balanceRate=0.5)
#data = generate_experiment_data_patch_prediction(purpose='validate', nsamples=2, patchSize=315, outPatchSize=215)
# plt.imshow(np.reshape(data[0][0],(315,315))); plt.figure()
# plt.imshow(np.reshape(data[1][0],(215,215))); plt.figure()
# plt.imshow(np.reshape(data[2][0],(215,215))); plt.show()
# image = mahotas.imread('ac3_input_0141.tif')
# image = normalizeImage(image)
# label = mahotas.imread('ac3_labels_0141.tif') / 255.
# test = adjust_imprecise_boundaries(image, label, 10)
# plt.imshow(label+image); plt.show()
# plt.imshow(test+image); plt.show()
| mit |
ywcui1990/nupic.research | projects/l2_pooling/noise_tolerance_l2.py | 9 | 19188 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016 - 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Test the noise tolerance of Layer 2 in isolation.
Perform an experiment to see if L2 eventually recognizes an object.
Test with various noise levels and with various column counts and synapse
sample sizes.
"""
from collections import defaultdict
import json
import math
import random
import os
import time
import matplotlib.pyplot as plt
import numpy as np
from htmresearch.algorithms.column_pooler import ColumnPooler
from htmresearch.frameworks.layers.sensor_placement import greedySensorPositions
L4_CELL_COUNT = 8*1024
def createRandomObjectDescriptions(numObjects,
numLocationsPerObject,
featurePool=("A", "B", "C")):
"""
Returns {"Object 1": [(0, "C"), (1, "B"), (2, "C"), ...],
"Object 2": [(0, "C"), (1, "A"), (2, "B"), ...]}
"""
return dict(("Object %d" % i,
zip(xrange(numLocationsPerObject),
[random.choice(featurePool)
for _ in xrange(numLocationsPerObject)]))
for i in xrange(1, numObjects + 1))
def noisy(pattern, noiseLevel, totalNumCells):
"""
Generate a noisy copy of a pattern.
Given number of active bits w = len(pattern),
deactivate noiseLevel*w cells, and activate noiseLevel*w other cells.
@param pattern (set)
A set of active indices
@param noiseLevel (float)
The percentage of the bits to shuffle
@param totalNumCells (int)
The number of cells in the SDR, active and inactive
@return (numpy array)
A noisy list of active indices
"""
n = int(noiseLevel * len(pattern))
noised = set(pattern)
noised.difference_update(random.sample(noised, n))
for _ in xrange(n):
while True:
v = random.randint(0, totalNumCells - 1)
if v not in pattern and v not in noised:
noised.add(v)
break
return np.array(sorted(noised), dtype="uint32")
def doExperiment(numColumns, l2Overrides, objectDescriptions, noiseMu,
noiseSigma, numInitialTraversals, noiseEverywhere):
"""
Touch every point on an object 'numInitialTraversals' times, then evaluate
whether it has inferred the object by touching every point once more and
checking the number of correctly active and incorrectly active cells.
@param numColumns (int)
The number of sensors to use
@param l2Overrides (dict)
Parameters for the ColumnPooler
@param objectDescriptions (dict)
A mapping of object names to their feature-locations.
See 'createRandomObjectDescriptions'.
@param noiseMu (float)
The average amount of noise in a feedforward input. The noise level for each
column's input is determined once per touch. It is a gaussian distribution
with mean 'noiseMu' and sigma 'noiseSigma'.
@param noiseSigma (float)
The sigma for the gaussian distribution of noise levels. If the noiseSigma is
0, then the noise level will always be 'noiseMu'.
@param numInitialTraversals (int)
The number of times to traverse the object before testing whether the object
has been inferred.
@param noiseEverywhere (bool)
If true, add noise to every column's input, and record accuracy of every
column. If false, add noise to one column's input, and only record accuracy
of that column.
"""
# For each column, keep a mapping from feature-location names to their SDRs
layer4sdr = lambda : np.array(sorted(random.sample(xrange(L4_CELL_COUNT),
40)), dtype="uint32")
featureLocationSDRs = [defaultdict(layer4sdr) for _ in xrange(numColumns)]
params = {"inputWidth": L4_CELL_COUNT,
"lateralInputWidths": [4096]*(numColumns-1),
"seed": random.randint(0, 1024)}
params.update(l2Overrides)
l2Columns = [ColumnPooler(**params)
for _ in xrange(numColumns)]
# Learn the objects
objectL2Representations = {}
for objectName, featureLocations in objectDescriptions.iteritems():
for featureLocationName in featureLocations:
# Touch it enough times for the distal synapses to reach the
# connected permanence, and then once more.
for _ in xrange(4):
allLateralInputs = [l2.getActiveCells() for l2 in l2Columns]
for columnNumber, l2 in enumerate(l2Columns):
feedforwardInput = featureLocationSDRs[columnNumber][featureLocationName]
lateralInputs = [lateralInput
for i, lateralInput in enumerate(allLateralInputs)
if i != columnNumber]
l2.compute(feedforwardInput, lateralInputs, learn=True)
objectL2Representations[objectName] = [set(l2.getActiveCells())
for l2 in l2Columns]
for l2 in l2Columns:
l2.reset()
results = []
# Try to infer the objects
for objectName, featureLocations in objectDescriptions.iteritems():
for l2 in l2Columns:
l2.reset()
sensorPositionsIterator = greedySensorPositions(numColumns, len(featureLocations))
# Touch each location at least numInitialTouches times, and then touch it
# once more, testing it. For each traversal, touch each point on the object
# ~once. Not once per sensor -- just once. So we translate the "number of
# traversals" into a "number of touches" according to the number of sensors.
numTouchesPerTraversal = len(featureLocations) / float(numColumns)
numInitialTouches = int(math.ceil(numInitialTraversals * numTouchesPerTraversal))
if noiseEverywhere:
numTestTouches = int(math.ceil(1 * numTouchesPerTraversal))
else:
numTestTouches = len(featureLocations)
for touch in xrange(numInitialTouches + numTestTouches):
sensorPositions = next(sensorPositionsIterator)
# Give the system a few timesteps to settle, allowing lateral connections
# to cause cells to be inhibited.
for _ in xrange(3):
allLateralInputs = [l2.getActiveCells() for l2 in l2Columns]
for columnNumber, l2 in enumerate(l2Columns):
position = sensorPositions[columnNumber]
featureLocationName = featureLocations[position]
feedforwardInput = featureLocationSDRs[columnNumber][featureLocationName]
if noiseEverywhere or columnNumber == 0:
noiseLevel = random.gauss(noiseMu, noiseSigma)
noiseLevel = max(0.0, min(1.0, noiseLevel))
feedforwardInput = noisy(feedforwardInput, noiseLevel, L4_CELL_COUNT)
lateralInputs = [lateralInput
for i, lateralInput in enumerate(allLateralInputs)
if i != columnNumber]
l2.compute(feedforwardInput, lateralInputs, learn=False)
if touch >= numInitialTouches:
if noiseEverywhere:
for columnNumber, l2 in enumerate(l2Columns):
activeCells = set(l2.getActiveCells())
correctCells = objectL2Representations[objectName][columnNumber]
results.append((len(activeCells & correctCells),
len(activeCells - correctCells)))
else:
activeCells = set(l2Columns[0].getActiveCells())
correctCells = objectL2Representations[objectName][0]
results.append((len(activeCells & correctCells),
len(activeCells - correctCells)))
return results
def plotSuccessRate_varyNumColumns(noiseSigma, noiseEverywhere):
"""
Run and plot the experiment, varying the number of cortical columns.
"""
#
# Run the experiment
#
noiseLevels = [x * 0.01 for x in xrange(0, 101, 5)]
l2Overrides = {"sampleSizeDistal": 20}
columnCounts = [1, 2, 3, 4]
results = defaultdict(list)
for trial in xrange(1):
print "trial", trial
objectDescriptions = createRandomObjectDescriptions(10, 10)
for numColumns in columnCounts:
print "numColumns", numColumns
for noiseLevel in noiseLevels:
r = doExperiment(numColumns, l2Overrides, objectDescriptions,
noiseLevel, noiseSigma, numInitialTraversals=6,
noiseEverywhere=noiseEverywhere)
results[(numColumns, noiseLevel)].extend(r)
#
# Plot it
#
numCorrectActiveThreshold = 30
numIncorrectActiveThreshold = 10
plt.figure()
colors = dict(zip(columnCounts,
('r', 'k', 'g', 'b')))
markers = dict(zip(columnCounts,
('o', '*', 'D', 'x')))
for numColumns in columnCounts:
y = []
for noiseLevel in noiseLevels:
trials = results[(numColumns, noiseLevel)]
numPassed = len([True for numCorrect, numIncorrect in trials
if numCorrect >= numCorrectActiveThreshold
and numIncorrect <= numIncorrectActiveThreshold])
y.append(numPassed / float(len(trials)))
plt.plot(noiseLevels, y,
color=colors[numColumns],
marker=markers[numColumns])
lgnd = plt.legend(["%d columns" % numColumns
for numColumns in columnCounts],
bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.xlabel("Mean feedforward noise level")
plt.xticks([0.01 * n for n in xrange(0, 101, 10)])
plt.ylabel("Success rate")
plt.yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
plt.title("Inference with normally distributed noise (stdev=%.2f)" % noiseSigma)
plotPath = os.path.join("plots",
"successRate_varyColumnCount_sigma%.2f_%s.pdf"
% (noiseSigma, time.strftime("%Y%m%d-%H%M%S")))
plt.savefig(plotPath, bbox_extra_artists=(lgnd,), bbox_inches="tight")
print "Saved file %s" % plotPath
def plotSuccessRate_varyDistalSampleSize(noiseSigma, noiseEverywhere):
"""
Run and plot the experiment, varying the distal sample size.
"""
#
# Run the experiment
#
noiseLevels = [x * 0.01 for x in xrange(0, 101, 5)]
noiseSigma = 0.1
sampleSizes = [13, 20, 30, 40]
numColumns = 3
results = defaultdict(list)
for trial in xrange(1):
print "trial", trial
objectDescriptions = createRandomObjectDescriptions(10, 10)
for sampleSizeDistal in sampleSizes:
print "sampleSizeDistal", sampleSizeDistal
l2Overrides = {"sampleSizeDistal": sampleSizeDistal}
for noiseLevel in noiseLevels:
r = doExperiment(numColumns, l2Overrides, objectDescriptions,
noiseLevel, noiseSigma, numInitialTraversals=6,
noiseEverywhere=noiseEverywhere)
results[(sampleSizeDistal, noiseLevel)].extend(r)
#
# Plot it
#
numCorrectActiveThreshold = 30
numIncorrectActiveThreshold = 10
plt.figure()
colorList = dict(zip(sampleSizes,
('r', 'k', 'g', 'b')))
markerList = dict(zip(sampleSizes,
('o', '*', 'D', 'x')))
for sampleSizeDistal in sampleSizes:
y = []
for noiseLevel in noiseLevels:
trials = results[(sampleSizeDistal, noiseLevel)]
numPassed = len([True for numCorrect, numIncorrect in trials
if numCorrect >= numCorrectActiveThreshold
and numIncorrect <= numIncorrectActiveThreshold])
y.append(numPassed / float(len(trials)))
plt.plot(noiseLevels, y,
color=colorList[sampleSizeDistal],
marker=markerList[sampleSizeDistal])
lgnd = plt.legend(["Distal sample size %d" % sampleSizeDistal
for sampleSizeDistal in sampleSizes],
bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.xlabel("Mean feedforward noise level")
plt.xticks([0.01 * n for n in xrange(0, 101, 10)])
plt.ylabel("Success rate")
plt.yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
plt.title("Inference with normally distributed noise (stdev=0.1)")
plotPath = os.path.join("plots",
"successRate_varyDistalSampleSize_sigma%.2f_%s.pdf"
% (noiseSigma, time.strftime("%Y%m%d-%H%M%S")))
plt.savefig(plotPath, bbox_extra_artists=(lgnd,), bbox_inches="tight")
print "Saved file %s" % plotPath
def plotSuccessRate_varyProximalSampleSize(noiseSigma, noiseEverywhere):
"""
Run and plot the experiment, varying the proximal sample size.
"""
#
# Run the experiment
#
noiseLevels = [x * 0.01 for x in xrange(0, 101, 5)]
noiseSigma = 0.1
sampleSizes = [13, 20, 30, 40]
numColumns = 3
results = defaultdict(list)
for trial in xrange(1):
print "trial", trial
objectDescriptions = createRandomObjectDescriptions(10, 10)
for sampleSizeProximal in sampleSizes:
print "sampleSizeProximal", sampleSizeProximal
l2Overrides = {"sampleSizeProximal": sampleSizeProximal}
for noiseLevel in noiseLevels:
r = doExperiment(numColumns, l2Overrides, objectDescriptions,
noiseLevel, noiseSigma, numInitialTraversals=6,
noiseEverywhere=noiseEverywhere)
results[(sampleSizeProximal, noiseLevel)].extend(r)
#
# Plot it
#
numCorrectActiveThreshold = 30
numIncorrectActiveThreshold = 10
plt.figure()
colorList = dict(zip(sampleSizes,
('r', 'k', 'g', 'b')))
markerList = dict(zip(sampleSizes,
('o', '*', 'D', 'x')))
for sampleSizeProximal in sampleSizes:
y = []
for noiseLevel in noiseLevels:
trials = results[(sampleSizeProximal, noiseLevel)]
numPassed = len([True for numCorrect, numIncorrect in trials
if numCorrect >= numCorrectActiveThreshold
and numIncorrect <= numIncorrectActiveThreshold])
y.append(numPassed / float(len(trials)))
plt.plot(noiseLevels, y,
color=colorList[sampleSizeProximal],
marker=markerList[sampleSizeProximal])
lgnd = plt.legend(["Proximal sample size %d" % sampleSizeProximal
for sampleSizeProximal in sampleSizes],
bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.xlabel("Mean feedforward noise level")
plt.xticks([0.01 * n for n in xrange(0, 101, 10)])
plt.ylabel("Success rate")
plt.yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
plt.title("Inference with normally distributed noise (stdev=0.1)")
plotPath = os.path.join("plots",
"successRate_varyProximalSampleSize_sigma%.2f_%s.pdf"
% (noiseSigma, time.strftime("%Y%m%d-%H%M%S")))
plt.savefig(plotPath, bbox_extra_artists=(lgnd,), bbox_inches="tight")
print "Saved file %s" % plotPath
def logCellActivity_varyNumColumns(noiseSigma, noiseEverywhere):
"""
Run the experiment, varying the column counts, and save each
[# correctly active cells, # incorrectly active cells]
pair to a JSON file that can be visualized.
"""
noiseLevels = [0.30, 0.40, 0.45, 0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90]
l2Overrides = {"sampleSizeDistal": 20}
columnCounts = [1, 2, 3, 4, 5]
results = defaultdict(list)
for trial in xrange(1):
print "trial", trial
objectDescriptions = createRandomObjectDescriptions(10, 10)
for numColumns in columnCounts:
print "numColumns", numColumns
for noiseLevel in noiseLevels:
r = doExperiment(numColumns, l2Overrides, objectDescriptions,
noiseLevel, noiseSigma, numInitialTraversals=6,
noiseEverywhere=noiseEverywhere)
results[(numColumns, noiseLevel)].extend(r)
d = []
for (numColumns, noiseLevel), cellCounts in results.iteritems():
d.append({"numColumns": numColumns,
"noiseLevel": noiseLevel,
"results": cellCounts})
filename = os.path.join("plots",
"varyColumns_sigma%.2f_%s.json"
% (noiseSigma, time.strftime("%Y%m%d-%H%M%S")))
with open(filename, "w") as fout:
json.dump(d, fout)
print "Wrote to", filename
print "Visualize this file at: http://numenta.github.io/htmresearch/visualizations/grid-of-scatterplots/L2-columns-with-noise.html"
if __name__ == "__main__":
# Plot the accuracy of inference when noise is added, varying the number of
# cortical columns. We find that when noise is applied at a constant equal
# rate for each column, the accuracy only improves slightly with more cortical
# columns.
plotSuccessRate_varyNumColumns(noiseSigma=0.0, noiseEverywhere=True)
# Change noise to a Gaussian random variable that is independently applied to
# different columns. We find that the accuracy now improves with more cortical
# columns. This means that noisy sensors benefit from having lateral input
# from non-noisy sensors. The sensors that happen to have high noise levels
# take advantage of the sensors that happen to have low noise levels, so the
# array as a whole can partially guard itself from noise.
plotSuccessRate_varyNumColumns(noiseSigma=0.1, noiseEverywhere=True)
plotSuccessRate_varyNumColumns(noiseSigma=0.2, noiseEverywhere=True)
# Plot the accuracy of inference when noise is added, varying the ratio of the
# proximal threshold to the proximal synapse sample size. We find that this
# ratio does more than any other parameter to determine at what noise level
# the accuracy drop-off occurs.
plotSuccessRate_varyProximalSampleSize(noiseSigma=0.1, noiseEverywhere=True)
# Plot the accuracy of inference when noise is added, varying the ratio of the
# distal segment activation threshold to the distal synapse sample size. We
# find that increasing this ratio provides additional noise tolerance on top
# of the noise tolerance provided by proximal connections.
plotSuccessRate_varyDistalSampleSize(noiseSigma=0.1, noiseEverywhere=True)
# Observe the impact of columns without noisy input on columns with noisy
# input. Add constant noise to one column's input, and don't add noise for the
# other columns. Observe what happens as more non-noisy columns are added. We
# find that the lateral input from other columns can help correctly active
# cells inhibit cells that shouldn't be active, but it doesn't help increase
# the number of correctly active cells. So the accuracy of inference is
# improved, but the confidence of the inference isn't.
logCellActivity_varyNumColumns(noiseSigma=0.0, noiseEverywhere=False)
| agpl-3.0 |
lhilt/scipy | scipy/interpolate/interpolate.py | 4 | 97600 | from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import functools
import operator
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
ravel, poly1d, asarray, intp)
import scipy.special as spec
from scipy.special import comb
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
r"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : `numpy.poly1d` instance
The Lagrange interpolating polynomial.
Examples
--------
Interpolate :math:`f(x) = x^3` by 3 points.
>>> from scipy.interpolate import lagrange
>>> x = np.array([0, 1, 2])
>>> y = x**3
>>> poly = lagrange(x, y)
Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,
it is given by
.. math::
\begin{aligned}
L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\
&= x (-2 + 3x)
\end{aligned}
>>> from numpy.polynomial.polynomial import Polynomial
>>> Polynomial(poly).coef
array([ 3., -2., 0.])
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated via nearest-neighbor extrapolation.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'previous', 'next', where 'zero', 'slinear', 'quadratic' and 'cubic'
refer to a spline interpolation of zeroth, first, second or third
order; 'previous' and 'next' simply return the previous or next value
of the point) or as an integer specifying the order of the spline
interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless ``fill_value="extrapolate"``.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Attributes
----------
fill_value
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest', 'previous', 'next'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: https://docs.python.org/reference/datamodel.html
if kind in ('linear', 'nearest', 'previous', 'next'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
elif kind == 'previous':
# Side for np.searchsorted and index for clipping
self._side = 'left'
self._ind = 0
# Move x by one floating point value to the left
self._x_shift = np.nextafter(self.x, -np.inf)
self._call = self.__class__._call_previousnext
elif kind == 'next':
self._side = 'right'
self._ind = 1
# Move x by one floating point value to the right
self._x_shift = np.nextafter(self.x, np.inf)
self._call = self.__class__._call_previousnext
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
if np.isnan(self.x).any():
xx = np.linspace(min(self.x), max(self.x), len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
"""The fill value."""
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the original data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_previousnext(self, x_new):
"""Use previous/next neighbour of x_new, y_new = f(x_new)."""
# 1. Get index of left/right value
x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)
# 2. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(1-self._ind,
len(self.x)-self._ind).astype(intp)
# 3. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices+self._ind-1]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if self.c.ndim < 2:
raise ValueError("Coefficients array must be at least "
"2-dimensional.")
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("axis=%s must be between 0 and %s" %
(axis, self.c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
``c`` and ``x`` must be arrays of the correct shape and type. The
``c`` array can only be of dtypes float and complex, and ``x``
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
``self.x`` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
``self.x`` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.solve()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if not isinstance(bp, BPoly):
raise TypeError(".from_bernstein_basis only accepts BPoly instances. "
"Got %s instead." % type(bp))
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**a * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature,
see for example [1]_ [2]_ [3]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if not isinstance(pp, PPoly):
raise TypeError(".from_power_basis only accepts PPoly instances. "
"Got %s instead." % type(pp))
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
``x = 1`` and ``x = 2``.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i+1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
r"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on ``[xa, xb]`` and having the values and derivatives at the
endpoints `xa` and `xb` as specified by `ya`` and `yb`.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of `ya` and `yb` are `na` and `nb`, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at `xa`. `ya[0]` is the value of the function, and
`ya[i]` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at `xb`.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At ``x = xb`` it's the same with ``a = n - q``.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
r"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point ``xp = (x', y', z', ...)`` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
``c`` and ``x`` must be arrays of the correct shape and type. The
``c`` array can only be of dtypes float and complex, and ``x``
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[tuple(sl)]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[tuple(sl)]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
If any of `points` have a dimension of size 1, linear interpolation will
return an array of `nan` values. Nearest-neighbor interpolation will work
as usual in this case.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Wikipedia, "Trilinear interpolation",
https://en.wikipedia.org/wiki/Trilinear_interpolation
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = [np.where(yi <= .5, i, i + 1)
for i, yi in zip(indices, norm_distances)]
return self.values[tuple(idx_res)]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class _ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("_ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
| bsd-3-clause |
aminert/scikit-learn | sklearn/preprocessing/tests/test_data.py | 113 | 38432 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
MechCoder/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 33 | 17877 | import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics import cluster as cluster_module
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
'neg_mean_squared_error', 'neg_mean_squared_log_error',
'neg_median_absolute_error', 'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'log_loss']
# All supervised cluster scorers (They behave like classification metric)
CLUSTER_SCORERS = ["adjusted_rand_score",
"homogeneity_score",
"completeness_score",
"v_measure_score",
"mutual_info_score",
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"fowlkes_mallows_score"]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_clf) for name in CLUSTER_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_supervised_cluster_scorers():
# Test clustering scorers against gold standard labeling.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
for name in CLUSTER_SCORERS:
score1 = get_scorer(name)(km, X_test, y_test)
score2 = getattr(cluster_module, name)(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
def test_deprecated_names():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
for name in ('mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'log_loss'):
warning_msg = "Scoring method %s was renamed to" % name
for scorer in (get_scorer(name), SCORERS[name]):
assert_warns_message(DeprecationWarning,
warning_msg,
scorer, clf, X, y)
assert_warns_message(DeprecationWarning,
warning_msg,
cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), f1_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), roc_auc_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
Ridge(), r2_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
KMeans(), cluster_module.adjusted_rand_score)
| bsd-3-clause |
iulian787/spack | var/spack/repos/builtin/packages/py-workload-automation/package.py | 4 | 2174 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyWorkloadAutomation(PythonPackage):
"""Workload Automation (WA) is a framework for executing workloads and
collecting measurements on Android and Linux devices."""
homepage = "https://github.com/ARM-software/workload-automation"
url = "https://github.com/ARM-software/workload-automation/archive/v3.2.tar.gz"
version('3.2', sha256='a3db9df6a9e0394231560ebe6ba491a513f6309e096eaed3db6f4cb924c393ea')
version('3.1.4', sha256='217fc33a3739d011a086315ef86b90cf332c16d1b03c9dcd60d58c9fd1f37f98')
version('3.1.3', sha256='152470808cf8dad8a833fd7b2cb7d77cf8aa5d1af404e37fa0a4ff3b07b925b2')
version('3.1.2', sha256='8226a6abc5cbd96e3f1fd6df02891237a06cdddb8b1cc8916f255fcde20d3069')
version('3.1.1', sha256='32a19be92e43439637c68d9146f21bb7a0ae7b8652c11dfc4b4bd66d59329ad4')
version('3.1.0', sha256='f00aeef7a1412144c4139c23b4c48583880ba2147207646d96359f1d295d6ac3')
version('3.0.0', sha256='8564b0c67541e3a212363403ee090dfff5e4df85770959a133c0979445b51c3c')
version('2.7.0', sha256='e9005b9db18e205bf6c4b3e09b15a118abeede73700897427565340dcd589fbb')
version('2.6.0', sha256='b94341fb067592cebe0db69fcf7c00c82f96b4eb7c7210e34b38473869824cce')
depends_on('py-setuptools', type='build')
depends_on('py-python-dateutil', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-pyserial', type=('build', 'run'))
depends_on('py-colorama', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('py-wrapt', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='^[email protected]:')
depends_on('[email protected]:0.24.2', type=('build', 'run'), when='^python@:3.5.2')
depends_on('py-future', type=('build', 'run'))
depends_on('py-louie', type=('build', 'run'))
depends_on('py-devlib', type=('build', 'run'))
| lgpl-2.1 |
alexsavio/scikit-learn | benchmarks/bench_mnist.py | 38 | 6799 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
chengsoonong/acton | acton/database.py | 1 | 72157 | """Wrapper class for databases."""
from abc import ABC, abstractmethod
from inspect import Traceback
import json
import logging
import os.path
import tempfile
from typing import Iterable, List, Sequence
import warnings
import time
from acton.proto.acton_pb2 import Database as DatabasePB
import astropy.io.ascii as io_ascii
import astropy.io.fits as io_fits
import astropy.table
import h5py
import numpy
import pandas
import sklearn.preprocessing
from numpy.random import multivariate_normal
LabelEncoderPB = DatabasePB.LabelEncoder
def product(seq: Iterable[int]):
"""Finds the product of a list of ints.
Arguments
---------
seq
List of ints.
Returns
-------
int
Product.
"""
prod = 1
for i in seq:
prod *= i
return prod
def serialise_encoder(
encoder: sklearn.preprocessing.LabelEncoder) -> LabelEncoderPB:
"""Serialises a LabelEncoder as a protobuf.
Parameters
----------
encoder
LabelEncoder.
Returns
-------
LabelEncoderPB
Protobuf representing the LabelEncoder.
"""
proto = LabelEncoderPB()
if not hasattr(encoder, 'classes_'):
return proto
for i, class_label in enumerate(encoder.classes_):
encoding = proto.encoding.add()
encoding.class_label = str(class_label)
encoding.class_int = i
return proto
class Database(ABC):
"""Base class for database wrappers."""
@abstractmethod
def __enter__(self):
return self
@abstractmethod
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
pass
@abstractmethod
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
@abstractmethod
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.ndarray
T x N x F array of label vectors.
"""
@abstractmethod
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
"""Writes feature vectors to the database.
Parameters
----------
ids
Iterable of IDs.
features
N x D array of feature vectors. The ith row corresponds to the ith
ID in `ids`.
"""
@abstractmethod
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
"""Writes label vectors to the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
labels
T x N x D array of label vectors. The ith row corresponds to the ith
labeller ID in `labeller_ids` and the jth column corresponds to the
jth instance ID in `instance_ids`.
"""
@abstractmethod
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
@abstractmethod
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
@abstractmethod
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
class HDF5Database(Database):
"""Database wrapping an HDF5 file as a context manager.
Attributes
----------
path : str
Path to HDF5 file.
_h5_file : h5py.File
HDF5 file object.
"""
def __init__(self, path: str):
self.path = path
def __enter__(self):
self._open_hdf5()
return self
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
self._h5_file.close()
delattr(self, '_h5_file')
def _assert_open(self):
"""Asserts that the HDF5 file is ready to be read to/written from.
Raises
------
AssertionError
"""
assert hasattr(self, '_h5_file'), ('HDF5 database must be used as a '
'context manager.')
def _open_hdf5(self):
"""Opens the HDF5 file and creates it if it doesn't exist.
Notes
-----
The HDF5 file will be stored in self._h5_file.
"""
try:
self._h5_file = h5py.File(self.path, 'r+')
except OSError:
with h5py.File(self.path, 'w') as h5_file:
self._setup_hdf5(h5_file)
self._h5_file = h5py.File(self.path, 'r+')
class ManagedHDF5Database(HDF5Database):
"""Database using an HDF5 file.
Notes
-----
This database uses an internal schema. For reading files from disk, use
another Database.
Attributes
----------
path : str
Path to HDF5 file.
label_dtype : str
Data type of labels.
feature_dtype : str
Data type of features.
_h5_file : h5py.File
Opened HDF5 file.
_sync_attrs : List[str]
List of instance attributes to sync with the HDF5 file's attributes.
"""
def __init__(self, path: str, label_dtype: str=None,
feature_dtype: str=None):
"""
Parameters
----------
path
Path to HDF5 file.
label_dtype
Data type of labels. If not provided then it will be read from the
database file; if the database file does not exist then the default
type of 'float32' will be used.
feature_dtype
Data type of features. If not provided then it will be read from the
database file; if the database file does not exist then the default
type of 'float32' will be used.
"""
super().__init__(path)
self.label_dtype = label_dtype
self._default_label_dtype = 'float32'
self.feature_dtype = feature_dtype
self._default_feature_dtype = 'float32'
# List of attributes to keep in sync with the HDF5 file.
self._sync_attrs = ['label_dtype', 'feature_dtype']
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'ManagedHDF5Database'
db_kwargs = {
'label_dtype': self.label_dtype,
'feature_dtype': self.feature_dtype}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
# No encoder for a managed DB - assume that labels are encoded already.
# proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def _open_hdf5(self):
"""Opens the HDF5 file and creates it if it doesn't exist.
Notes
-----
The HDF5 file will be stored in self._h5_file.
"""
super()._open_hdf5()
# Load attrs from HDF5 file if we haven't specified them.
for attr in self._sync_attrs:
if getattr(self, attr) is None:
setattr(self, attr, self._h5_file.attrs[attr])
self._validate_hdf5()
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
"""Writes feature vectors to the database.
Parameters
----------
ids
Iterable of IDs.
features:
N x D array of feature vectors. The ith row corresponds to the ith
ID in `ids`.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
self._assert_open()
# Input validation.
if len(ids) != len(features):
raise ValueError('Must have same number of IDs and features.')
if self._h5_file.attrs['n_features'] == -1:
# This is the first time we've stored features, so make a record of
# the dimensionality.
self._h5_file.attrs['n_features'] = features.shape[1]
elif self._h5_file.attrs['n_features'] != features.shape[1]:
raise ValueError(
'Expected features to have dimensionality {}, got {}'.format(
self._h5_file.attrs['n_features'], features.shape[1]))
# Early termination.
if not ids:
return
# Cast the features to the right type.
if features.dtype != self.feature_dtype:
warnings.warn('Casting features from type {} to type {}.'.format(
features.dtype, self.feature_dtype))
features = features.astype(self.feature_dtype)
# Resize the feature array if we need to store more IDs than before.
max_id = max(ids) + 1
if max_id > self._h5_file['features'].shape[0]:
self._h5_file['features'].resize(
(max_id, self._h5_file.attrs['n_features']))
# Store the feature vectors.
# TODO(MatthewJA): Vectorise this. This could be tricky as HDF5 doesn't
# fully support NumPy's fancy indexing.
for id_, feature in zip(ids, features):
self._h5_file['features'][id_, :] = feature
# Add the IDs to the database.
known_ids = set(self.get_known_instance_ids())
new_ids = [i for i in ids if i not in known_ids]
n_new_ids = len(new_ids)
n_old_ids = self._h5_file['instance_ids'].shape[0]
self._h5_file['instance_ids'].resize((n_old_ids + n_new_ids,))
self._h5_file['instance_ids'][-n_new_ids:] = numpy.array(
new_ids, dtype=int)
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
self._assert_open()
if self._h5_file.attrs['n_features'] == -1 and ids:
raise KeyError('No features stored in database.')
# Allocate the features array.
features = numpy.zeros((len(ids), self._h5_file.attrs['n_features']),
dtype=self._h5_file.attrs['feature_dtype'])
# Loop through each ID we want to query and put the associated feature
# into the features array.
features = self._h5_file['features'].value[ids, :]
features = numpy.asarray(
features, dtype=self._h5_file.attrs['feature_dtype'])
return features
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
"""Writes label vectors to the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
labels
T x N x D array of label vectors. The ith row corresponds to the ith
labeller ID in `labeller_ids` and the jth column corresponds to the
jth instance ID in `instance_ids`.
"""
self._assert_open()
# Input validation.
if len(labeller_ids) != labels.shape[0]:
raise ValueError(
'labels array has incorrect number of labellers:'
' expected {}, got {}.'.format(len(labeller_ids),
labels.shape[0]))
if len(instance_ids) != labels.shape[1]:
raise ValueError(
'labels array has incorrect number of instances:'
' expected {}, got {}.'.format(len(instance_ids),
labels.shape[1]))
if self._h5_file.attrs['label_dim'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['label_dim'] = labels.shape[2]
elif self._h5_file.attrs['label_dim'] != labels.shape[2]:
raise ValueError(
'Expected labels to have dimensionality {}, got {}'.format(
self._h5_file.attrs['label_dim'], labels.shape[2]))
# Early termination.
if not labeller_ids or not instance_ids:
return
# Cast the labels to the right type.
if labels.dtype != self.label_dtype:
warnings.warn('Casting labels from type {} to type {}.'.format(
labels.dtype, self.label_dtype))
labels = labels.astype(self.label_dtype)
# Resize the label array if necessary.
max_labeller_id = max(labeller_ids) + 1
max_instance_id = max(instance_ids) + 1
if (max_labeller_id > self._h5_file['labels'].shape[0] or
max_instance_id > self._h5_file['labels'].shape[1]):
self._h5_file['labels'].resize(
(max_labeller_id, max_instance_id,
self._h5_file.attrs['label_dim']))
# Store the labels.
# TODO(MatthewJA): Vectorise this.
for labeller_idx, labeller_id in enumerate(labeller_ids):
for instance_idx, instance_id in enumerate(instance_ids):
label = labels[labeller_idx, instance_idx]
self._h5_file['labels'][
labeller_id, instance_id, :] = label
logging.debug(
'New label array size: {}'.format(self._h5_file['labels'].shape))
# Add the instance IDs to the database.
known_instance_ids = set(self.get_known_instance_ids())
new_instance_ids = [i for i in instance_ids
if i not in known_instance_ids]
n_new_instance_ids = len(new_instance_ids)
n_old_instance_ids = self._h5_file['instance_ids'].shape[0]
if n_new_instance_ids:
self._h5_file['instance_ids'].resize(
(n_old_instance_ids + n_new_instance_ids,))
self._h5_file['instance_ids'][-n_new_instance_ids:] = numpy.array(
new_instance_ids, dtype=int)
# Add the labeller IDs to the database.
known_labeller_ids = set(self.get_known_labeller_ids())
new_labeller_ids = [i for i in labeller_ids
if i not in known_labeller_ids]
n_new_labeller_ids = len(new_labeller_ids)
n_old_labeller_ids = self._h5_file['labeller_ids'].shape[0]
if n_new_labeller_ids:
self._h5_file['labeller_ids'].resize(
(n_old_labeller_ids + n_new_labeller_ids,))
self._h5_file['labeller_ids'][-n_new_labeller_ids:] = numpy.array(
new_labeller_ids, dtype=int)
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.ndarray
T x N x F array of label vectors.
"""
self._assert_open()
if self._h5_file.attrs['label_dim'] == -1 and (
labeller_ids or instance_ids):
raise KeyError('No labels stored in database.')
labels = self._h5_file['labels'].value[labeller_ids][:, instance_ids, :]
labels = numpy.asarray(labels, dtype=self._h5_file.attrs['label_dtype'])
return labels
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
self._assert_open()
return [id_ for id_ in self._h5_file['instance_ids']]
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
self._assert_open()
return [id_ for id_ in self._h5_file['labeller_ids']]
def _setup_hdf5(self, h5_file: h5py.File):
"""Sets up an HDF5 file to work as a database.
Parameters
----------
h5_file
HDF5 file to set up. Must be opened in write mode.
"""
if self.label_dtype is None:
self.label_dtype = self._default_label_dtype
if self.feature_dtype is None:
self.feature_dtype = self._default_feature_dtype
h5_file.create_dataset('features', shape=(0, 0),
dtype=self.feature_dtype,
maxshape=(None, None))
h5_file.create_dataset('labels', shape=(0, 0, 0),
dtype=self.label_dtype,
maxshape=(None, None, None))
h5_file.create_dataset('instance_ids', shape=(0,),
dtype=int, maxshape=(None,))
h5_file.create_dataset('labeller_ids', shape=(0,),
dtype=int, maxshape=(None,))
h5_file.attrs['label_dtype'] = self.label_dtype
h5_file.attrs['feature_dtype'] = self.feature_dtype
h5_file.attrs['n_features'] = -1
h5_file.attrs['label_dim'] = -1
def _validate_hdf5(self):
"""Checks that self._h5_file has the correct schema.
Raises
------
ValueError
"""
try:
assert 'features' in self._h5_file
assert 'labels' in self._h5_file
assert 'instance_ids' in self._h5_file
assert 'labeller_ids' in self._h5_file
assert len(self._h5_file['features'].shape) == 2
assert len(self._h5_file['labels'].shape) == 3
assert len(self._h5_file['instance_ids'].shape) == 1
assert len(self._h5_file['labeller_ids'].shape) == 1
except AssertionError:
raise ValueError(
'File {} is not a valid database.'.format(self.path))
for attr in self._sync_attrs:
assert getattr(self, attr) is not None
if self._h5_file.attrs[attr] != getattr(self, attr):
raise ValueError('Incompatible {}: expected {}, got {}'.format(
attr, getattr(self, attr), self._h5_file.attrs[attr]))
class GraphDatabase(HDF5Database):
"""Manage database handling knowledge graph factorization,
Attributes
-----------
path: str
Path to HDF5 file.
"""
def __init__(self, path: str):
"""
Parameters
----------
path
Path to HDF5 file.
"""
self.path = path
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'ManagedHDF5Database'
db_kwargs = {
'label_dtype': self.label_dtype,
'feature_dtype': self.feature_dtype}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
# No encoder for a managed DB - assume that labels are encoded already.
# proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def _open_hdf5(self):
"""Opens the HDF5 file and creates it if it doesn't exist.
Notes
-----
The HDF5 file will be stored in self._h5_file.
"""
try:
self._h5_file = h5py.File(self.path, 'r+')
except OSError:
with h5py.File(self.path, 'w') as h5_file:
self._setup_hdf5(h5_file)
self._h5_file = h5py.File(self.path, 'r+')
def _setup_hdf5(self, h5_file: h5py.File):
"""Sets up an HDF5 file to work as a database.
Parameters
----------
h5_file
HDF5 file to set up. Must be opened in write mode.
"""
h5_file.create_dataset('features_E',
shape=(0, 0, 0),
maxshape=(None, None, None))
h5_file.create_dataset('features_R',
shape=(0, 0, 0, 0),
maxshape=(None, None, None, None))
h5_file.create_dataset('labels',
shape=(0, 0, 0),
maxshape=(None, None, None))
h5_file.attrs['n_entities'] = -1
h5_file.attrs['n_relations'] = -1
h5_file.attrs['n_dim'] = -1
h5_file.attrs['n_particles'] = -1
def _validate_hdf5(self):
"""Checks that self._h5_file has the correct schema.
Raises
------
ValueError
"""
try:
assert 'features_E' in self._h5_file
assert 'features_R' in self._h5_file
assert 'labels' in self._h5_file
assert len(self._h5_file['features_E'].shape) == 3
assert len(self._h5_file['features_R'].shape) == 4
assert len(self._h5_file['labels'].shape) == 3
except AssertionError:
raise ValueError(
'File {} is not a valid database.'.format(self.path))
def write_labels(self,
labels: numpy.ndarray):
"""Writes label vectors to the database.
Parameters
----------
labels
K x N x N array of label vectors.
K is the number of relations, N is the number of entities.
"""
self._assert_open()
# Input validation.
if self._h5_file.attrs['n_relations'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_relations'] = labels.shape[0]
elif self._h5_file.attrs['n_relations'] != labels.shape[0]:
raise ValueError(
'Expected number of relations {}, glot {}'.format(
self._h5_file.attrs['n_relations'], labels.shape[0]))
if self._h5_file.attrs['n_entities'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_entities'] = labels.shape[1]
elif self._h5_file.attrs['n_entities'] != labels.shape[1]:
raise ValueError(
'Expected number of entities {}, glot {}'.format(
self._h5_file.attrs['n_entities'], labels.shape[1]))
# Resize the label array if necessary.
if (labels.shape[0] > self._h5_file['labels'].shape[0] or
labels.shape[1] > self._h5_file['labels'].shape[1] or
labels.shape[2] > self._h5_file['labels'].shape[2]):
self._h5_file['labels'].resize(labels.shape)
# Store the labels.
# TODO(MatthewJA): Vectorise this.
for i in range(labels.shape[0]):
self._h5_file['labels'][i, :] = labels[i, :]
logging.debug(
'New label array size: {}'.format(self._h5_file['labels'].shape))
def write_features(self,
features_E: numpy.ndarray,
features_R: numpy.ndarray):
"""Writes feature vectors to the database.
Parameters
----------
features_E:
P x N x D array of entity feature vectors.
P is the number of particles.
N is the number of entities.
D is the number of latent variable dimensions.
features_R:
P x K x D x D array of relation feature vectors.
P is the number of particles.
K is the number of relations.
D is the number of latent variable dimensions.
"""
self._assert_open()
n_particles = features_E.shape[0]
assert features_E.shape[0] == features_R.shape[0]
n_entities = features_E.shape[1]
n_relations = features_R.shape[1]
n_dim = features_E.shape[2]
assert features_E.shape[2] == features_R.shape[2] == features_R.shape[3]
# Input validation.
if self._h5_file.attrs['n_relations'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_relations'] = n_relations
elif self._h5_file.attrs['n_relations'] != n_relations:
raise ValueError(
'Expected number of relations {}, glot {}'.format(
self._h5_file.attrs['n_relations'], n_relations))
if self._h5_file.attrs['n_entities'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_entities'] = n_entities
elif self._h5_file.attrs['n_entities'] != n_entities:
raise ValueError(
'Expected number of entities {}, glot {}'.format(
self._h5_file.attrs['n_entities'], n_entities))
if self._h5_file.attrs['n_dim'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_dim'] = n_dim
elif self._h5_file.attrs['n_dim'] != n_dim:
raise ValueError(
'Expected number of latent dimensions {}, glot {}'.format(
self._h5_file.attrs['n_dim'], n_dim))
if self._h5_file.attrs['n_particles'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_particles'] = n_particles
elif self._h5_file.attrs['n_particles'] != n_particles:
raise ValueError(
'Expected number of partibles {}, glot {}'.format(
self._h5_file.attrs['n_particles'], n_particles))
# Resize the feature array if we need to store more IDs than before.
if (features_E.shape[0] > self._h5_file['features_E'].shape[0] or
features_E.shape[1] > self._h5_file['features_E'].shape[1] or
features_E.shape[2] > self._h5_file['features_E'].shape[2]):
self._h5_file['features_E'].resize(features_E.shape)
if (features_R.shape[0] > self._h5_file['features_R'].shape[0] or
features_R.shape[1] > self._h5_file['features_R'].shape[1] or
features_R.shape[2] > self._h5_file['features_R'].shape[2]):
self._h5_file['features_R'].resize(features_R.shape)
# Store the feature vectors.
# TODO(MatthewJA): Vectorise this. This could be tricky as HDF5 doesn't
# fully support NumPy's fancy indexing.
for id_, feature in enumerate(features_E):
self._h5_file['features_E'][id_, :] = feature
for id_, feature in enumerate(features_R):
self._h5_file['features_R'][id_, :, :] = feature
logging.debug(
'New feature E array size: {}'.format(
self._h5_file['features_E'].shape))
logging.debug(
'New feature R array size: {}'.format(
self._h5_file['features_R'].shape))
def read_labels(self,
instance_ids: Sequence[tuple]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
instance_ids
sequence of ids to be read labels
empty list indicates reading all labels in once
Returns
-------
numpy.ndarray
array of label vectors.
"""
self._assert_open()
n_entities = self._h5_file.attrs['n_entities']
n_relations = self._h5_file.attrs['n_relations']
if (n_entities == -1 or n_relations == -1):
raise KeyError('No labels stored in database.')
if len(instance_ids) == 0:
return numpy.asarray(self._h5_file['labels'].value)
else:
labels = []
for tuple_ in instance_ids:
r_k, e_i, e_j = tuple_
labels.append(self._h5_file['labels'].value[r_k, e_i, e_j])
return numpy.asarray(labels)
def read_features(self) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
Returns
-------
E
numpy.ndarray
P x N x D array of feature vectors.
R
list
each element is numpy.ndarray
P x K x D x D array of feature vectors.
"""
self._assert_open()
if self._h5_file.attrs['n_particles'] == -1:
raise KeyError('No features stored in database.')
# Allocate the features array.
features_E = numpy.zeros((self._h5_file.attrs['n_particles'],
self._h5_file.attrs['n_entities']),
self._h5_file.attrs['n_dim'])
features_R = numpy.zeros((self._h5_file.attrs['n_particles'],
self._h5_file.attrs['n_relations'],
self._h5_file.attrs['n_dim'],
self._h5_file.attrs['n_dim']))
# Loop through each ID we want to query and put the associated feature
# into the features array.
features_E = self._h5_file['features_E'].value
features_R = self._h5_file['features_R'].value
features_E = numpy.asarray(features_E)
features_R = numpy.asarray(features_R)
return features_E, features_R
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
self._assert_open()
return [id_ for id_ in self._h5_file['instance_ids']]
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
self._assert_open()
return [id_ for id_ in self._h5_file['labeller_ids']]
class HDF5Reader(HDF5Database):
"""Reads HDF5 databases.
Attributes
----------
feature_cols : List[str]
List of feature datasets.
label_col : str
Name of label dataset.
n_features : int
Number of features.
n_instances : int
Number of instances.
n_labels : int
Number of labels per instance.
path : str
Path to HDF5 file.
encode_labels : bool
Whether to encode labels as integers.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers.
_h5_file : h5py.File
HDF5 file object.
_is_multidimensional : bool
Whether the features are in a multidimensional dataset.
"""
def __init__(self, path: str, feature_cols: List[str], label_col: str,
encode_labels: bool=True,
label_encoder: sklearn.preprocessing.LabelEncoder=None):
"""
Parameters
----------
path
Path to HDF5 file.
feature_cols
List of feature datasets. If only one feature dataset is specified,
this dataset is allowed to be a multidimensional dataset and contain
multiple features.
label_col
Name of label dataset.
encode_labels
Whether to encode labels as integers.
label_encoder
Encodes labels as integers. If not specified, the label column will
be read and a label encoding generated.
"""
super().__init__(path)
if not feature_cols:
raise ValueError('Must specify feature columns for HDF5.')
self.feature_cols = feature_cols
self.label_col = label_col
self.encode_labels = encode_labels
self.label_encoder = label_encoder
if self.label_encoder and not self.encode_labels:
raise ValueError('label_encoder specified but encode_labels is '
'False')
if self.label_encoder is None:
self.label_encoder = sklearn.preprocessing.LabelEncoder()
with h5py.File(self.path, 'r') as data:
is_multidimensional = any(len(data[f_col].shape) > 1 or
not product(data[f_col].shape[1:]) == 1
for f_col in feature_cols)
if is_multidimensional and len(feature_cols) != 1:
raise ValueError(
'Feature arrays and feature columns cannot be mixed. '
'To read in features from a multidimensional dataset, '
'only specify one feature column name.')
self._is_multidimensional = is_multidimensional
self.n_instances = data[label_col].shape[0]
if len(data[label_col].shape) == 1:
self.n_labels = 1
else:
assert len(data[label_col].shape) == 2
self.n_labels = data[label_col].shape[1]
if is_multidimensional:
self.n_features = data[feature_cols[0]].shape[1]
else:
self.n_features = len(feature_cols)
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'HDF5Reader'
db_kwargs = {
'feature_cols': self.feature_cols,
'label_col': self.label_col,
'encode_labels': self.encode_labels}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
# TODO(MatthewJA): Optimise this.
self._assert_open()
# For each ID, get the corresponding features.
if self._is_multidimensional:
# If there are duplicates in ids, then this will crash with an
# OSError! (and a very cryptic error message...) To get around this,
# we'll first get all the unique IDs.
unique_ids = []
unique_ids_set = set() # For lookups.
id_to_index = {} # For reconstructing the features.
for id_ in ids:
if id_ not in unique_ids_set:
unique_ids.append(id_)
unique_ids_set.add(id_)
id_to_index[id_] = len(unique_ids) - 1
# Then index with just the unique IDs.
features_ = self._h5_file[self.feature_cols[0]][unique_ids]
# Finally, reconstruct the features array.
features = numpy.zeros((len(ids), features_.shape[1]))
for index, id_ in enumerate(ids):
index_ = id_to_index[id_]
features[index, :] = features_[index_, :]
return features
else:
# Allocate output array.
features = numpy.zeros((len(ids), len(self.feature_cols)))
# Read each feature.
features_h5 = self._h5_file[self.feature_cols[0]]
for feature_idx, feature_name in enumerate(self.feature_cols):
features[ids, feature_idx] = features_h5[feature_name][ids]
return numpy.nan_to_num(features)
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.ndarray
T x N x F array of label vectors.
"""
self._assert_open()
if len(labeller_ids) > 1:
raise NotImplementedError('Multiple labellers not yet supported.')
# TODO(MatthewJA): Optimise this.
# For each ID, get the corresponding labels.
# If there are duplicates in ids, then this will crash with an
# OSError! (and a very cryptic error message...) To get around this,
# we'll first get all the unique IDs.
unique_ids = []
unique_ids_set = set() # For lookups.
id_to_index = {} # For reconstructing the labels.
for id_ in instance_ids:
if id_ not in unique_ids_set:
unique_ids.append(id_)
unique_ids_set.add(id_)
id_to_index[id_] = len(unique_ids) - 1
# Then index with just the unique IDs.
labels_ = self._h5_file[self.label_col][unique_ids].reshape(
(1, len(unique_ids), -1))
# Finally, reconstruct the labels array.
labels = numpy.zeros(
(1, len(instance_ids), labels_.shape[2]),
dtype=labels_.dtype)
for index, id_ in enumerate(instance_ids):
index_ = id_to_index[id_]
labels[0, index, :] = labels_[0, index_, :]
if labels.shape[2] != 1:
raise NotImplementedError('Multidimensional labels not currently '
'supported.')
# Encode labels.
if self.encode_labels:
labels = numpy.apply_along_axis(
self.label_encoder.fit_transform,
axis=1,
arr=labels.reshape(labels.shape[:2])
).reshape(labels.shape)
return labels
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
self._assert_open()
return [i for i in range(self.n_instances)]
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
raise NotImplementedError()
class ASCIIReader(Database):
"""Reads ASCII databases.
Attributes
----------
feature_cols : List[str]
List of feature columns.
label_col : str
Name of label column.
max_id_length : int
Maximum length of IDs.
n_features : int
Number of features.
n_instances : int
Number of instances.
n_labels : int
Number of labels per instance.
path : str
Path to ASCII file.
encode_labels : bool
Whether to encode labels as integers.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers.
_db : Database
Underlying ManagedHDF5Database.
_db_filepath : str
Path of underlying HDF5 database.
_tempdir : str
Temporary directory where the underlying HDF5 database is stored.
"""
def __init__(self, path: str, feature_cols: List[str], label_col: str,
encode_labels: bool=True,
label_encoder: sklearn.preprocessing.LabelEncoder=None):
"""
Parameters
----------
path
Path to ASCII file.
feature_cols
List of feature columns.
label_col
Name of label column.
encode_labels
Whether to encode labels as integers.
label_encoder
Encodes labels as integers. If not specified, the label column will
be read and a label encoding generated.
"""
self.path = path
self.feature_cols = feature_cols
self.label_col = label_col
self.encode_labels = encode_labels
self.label_encoder = label_encoder
if self.label_encoder and not self.encode_labels:
raise ValueError('label_encoder specified but encode_labels is '
'False')
if self.label_encoder is None:
self.label_encoder = sklearn.preprocessing.LabelEncoder()
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'ASCIIReader'
db_kwargs = {
'feature_cols': self.feature_cols,
'label_col': self.label_col,
'encode_labels': self.encode_labels}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def _db_from_ascii(self,
db: Database,
data: astropy.table.Table,
feature_cols: Sequence[str],
label_col: str,
ids: Sequence[int]):
"""Reads an ASCII table into a database.
Notes
-----
The entire file is copied into memory.
Arguments
---------
db
Database.
data
ASCII table.
feature_cols
List of column names of the features. If empty, all non-label and
non-ID columns will be used.
label_col
Column name of the labels.
ids
List of instance IDs.
"""
# Read in features.
columns = data.keys()
if not feature_cols:
# If there are no features given, use all columns.
feature_cols = [c for c in columns if c != label_col]
# This converts the features from a table to an array.
features = data[feature_cols].as_array()
features = features.view(numpy.float64).reshape(features.shape + (-1,))
# Read in labels.
labels = numpy.array(
data[label_col]).reshape((1, -1, 1))
# We want to support multiple labellers in the future, but currently
# don't. So every labeller is the same, ID = 0.
labeller_ids = [0]
# Encode labels.
if self.encode_labels:
labels = numpy.apply_along_axis(
self.label_encoder.fit_transform,
axis=1,
arr=labels.reshape(labels.shape[:2])
).reshape(labels.shape)
# Write to database.
db.write_features(ids, features)
db.write_labels(labeller_ids, ids, labels)
def __enter__(self):
self._tempdir = tempfile.TemporaryDirectory(prefix='acton')
# Read the whole file into a DB.
self._db_filepath = os.path.join(self._tempdir.name, 'db.h5')
data = io_ascii.read(self.path)
ids = list(range(len(data[self.label_col])))
max_label_len = max(len(str(i)) for i in data[self.label_col])
label_dtype = '<S{}'.format(max_label_len)
self._db = ManagedHDF5Database(
self._db_filepath,
label_dtype=label_dtype,
feature_dtype='float64')
self._db.__enter__()
try:
# We want to handle the encoding ourselves.
self._db_from_ascii(self._db, data, self.feature_cols,
self.label_col, ids, encode_labels=False)
except TypeError:
# Encoding isn't supported in the underlying database.
self._db_from_ascii(self._db, data, self.feature_cols,
self.label_col, ids)
return self
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
self._db.__exit__(exc_type, exc_val, exc_tb)
self._tempdir.cleanup()
delattr(self, '_db')
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
return self._db.read_features(ids)
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.ndarray
T x N x F array of label vectors.
"""
# N.B. Labels are encoded in _db_from_ascii.
return self._db.read_labels(labeller_ids, instance_ids)
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
raise NotImplementedError('Cannot write to read-only database.')
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
raise NotImplementedError('Cannot write to read-only database.')
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
return self._db.get_known_instance_ids()
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
return self._db.get_known_labeller_ids()
class GraphReader(Database):
"""Reads ASCII databases for graph based structure
Input file:
List of known facts,
formatted as relation_id \tab entity_id1 \tab entity_id2,
means entity_id1 has relation_id relation with entity_id2,
Both entity-id and relation-id start from 0.
Output labels:
K x N x N ndarrays,
where K is the number of relations,
N is the number of entities.
0 represents invalid facts, 1 represents valid facts.
Output features:
E is N x D latent features of the entities.
R is K x D x D latent features of the relations.
Features are initially random/gibbs sampled,
will be sequentially updated after getting labels
Attributes
----------
path : str
Path to ASCII file.
_db : Database
Underlying ManagedHDF5Database.
_db_filepath : str
Path of underlying HDF5 database.
_tempdir : str
Temporary directory where the underlying HDF5 database is stored.
n_dim
Number of latent features (size of latent dimension).
n_particles:
Number of particles for Thompson sampling.
gibbs_init
Indicates how to sample features (gibbs/random).
var_r
variance of prior of R
var_e
variance of prior of E
var_x
variance of X
obs_mask
Mask tensor of observed triples.
given_r
whether there is any R given for initialization
"""
def __init__(self, path: str, n_dim: int, n_particles: int = 5,
gibbs_init: bool = True, var_r: int = 1, var_e: int = 1,
var_x: float = 0.01, obs_mask: numpy.ndarray= None,
given_r: numpy.ndarray = None):
"""
Parameters
----------
path
Path to ASCII file.
n_dim
Number of latent features (size of latent dimension).
n_particles:
Number of particles for Thompson sampling.
gibbs_init
Indicates how to sample features (gibbs/random).
var_r
variance of prior of R
var_e
variance of prior of E
var_x
variance of X
obs_mask
Mask tensor of observed triples.
given_r
Given features R if any
"""
self.path = path
self.n_dim = n_dim
self.n_particles = n_particles
self.gibbs_init = gibbs_init
self.var_r = var_r
self.var_e = var_e
self.var_x = var_x
self.obs_mask = obs_mask
self.given_r = given_r
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'LabelOnlyASCIIReader'
db_kwargs = {
'n_dim': self.n_dim,
'n_particles': self.n_particles, }
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
return proto
def _db_from_ascii(self,
db: Database,
data: astropy.table.Table,
):
"""Reads an ASCII table into a database.
Notes
-----
The entire file is copied into memory.
Arguments
---------
db
Database.
data
ASCII table.
"""
# triples: relation_id entity_id1 entity_id2
# e.g. (0,2,4) represents entity 2 and 4 have relation 0
triples = data.as_array()
triples = triples.view(numpy.int).reshape((triples.shape[0], 3))
self.n_relations = max(triples[:, 0]) + 1
self.n_entities = max(triples[:, 1]) + 1
assert self.n_entities == max(triples[:, -1]) + 1
# only support one labeller
# construct label tensor X = {0,1}^{K x N x N}
X = numpy.zeros((self.n_relations, self.n_entities, self.n_entities))
for i in triples:
X[i[0], i[1], i[2]] = 1
# Initailize features E,R
self.E = list()
self.R = list()
self.RE = numpy.zeros([self.n_relations, self.n_entities, self.n_dim])
self.RTE = numpy.zeros([self.n_relations, self.n_entities, self.n_dim])
if isinstance(self.obs_mask, type(None)):
self.obs_mask = numpy.zeros_like(X)
else:
logging.info(
"Initial Total, Positive, Negative Obs : %d / %d / %d",
numpy.sum(self.obs_mask),
numpy.sum(X[self.obs_mask == 1]),
numpy.sum(self.obs_mask) - numpy.sum(X[self.obs_mask == 1]))
cur_obs = numpy.zeros_like(X)
for k in range(self.n_relations):
cur_obs[k][self.obs_mask[k] == 1] = X[k][self.obs_mask[k] == 1]
self.obs_sum = numpy.sum(numpy.sum(self.obs_mask, 1), 1)
self.valid_relations = numpy.nonzero(numpy.sum(numpy.sum(X, 1), 1))[0]
self.features = numpy.zeros(
[2 * self.n_entities * self.n_relations, self.n_dim])
self.xi = numpy.zeros([2 * self.n_entities * self.n_relations])
# cur_obs[cur_obs.nonzero()] = 1
if self.gibbs_init and numpy.sum(self.obs_sum) != 0:
# initialize latent variables with gibbs sampling
E = numpy.random.random([self.n_entities, self.n_dim])
R = numpy.random.random([self.n_relations, self.n_dim, self.n_dim])
for gi in range(20):
tic = time.time()
if isinstance(self.given_r, type(None)):
self._sample_relations(
cur_obs, self.obs_mask, E, R, self.var_r)
self._sample_entities(
cur_obs, self.obs_mask, E, R, self.var_e)
else:
self._sample_entities(
cur_obs, self.obs_mask, E, R, self.var_e)
logging.info("Gibbs Init %d: %f", gi, time.time() - tic)
for p in range(self.n_particles):
self.E.append(E.copy())
self.R.append(R.copy())
else:
# random initialization
for p in range(self.n_particles):
self.E.append(numpy.random.random(
[self.n_entities, self.n_dim]))
self.R.append(numpy.random.random(
[self.n_relations, self.n_dim, self.n_dim]))
self.E = numpy.asarray(self.E)
self.R = numpy.asarray(self.R)
# Write to database.
db.write_features(self.E, self.R)
db.write_labels(X)
def __enter__(self):
self._tempdir = tempfile.TemporaryDirectory(prefix='acton')
# Read the whole file into a DB.
self._db_filepath = os.path.join(self._tempdir.name, 'db.h5')
data = io_ascii.read(self.path)
self._db = GraphDatabase(self._db_filepath)
self._db.__enter__()
self._db_from_ascii(self._db, data)
return self
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
self._db.__exit__(exc_type, exc_val, exc_tb)
self._tempdir.cleanup()
delattr(self, '_db')
def read_features(self) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
Returns
-------
E
numpy.ndarray
P x N x D array of feature vectors.
R
list
each element is numpy.ndarray
P x K x D x D array of feature vectors.
N x D array of feature vectors.
"""
return self._db.read_features()
def read_labels(self,
instance_ids: Sequence[tuple]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
Returns
-------
numpy.ndarray
array of label vectors.
"""
# N.B. Labels are encoded in _db_from_ascii.
return self._db.read_labels(instance_ids)
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
raise NotImplementedError('Cannot write to read-only database.')
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
raise NotImplementedError('Cannot write to read-only database.')
def _sample_entities(self, X, mask, E, R, var_e, sample_idx=None):
RE = self.RE
RTE = self.RTE
for k in range(self.n_relations):
RE[k] = numpy.dot(R[k], E.T).T
RTE[k] = numpy.dot(R[k].T, E.T).T
if isinstance(sample_idx, type(None)):
sample_idx = range(self.n_entities)
for i in sample_idx:
self._sample_entity(X, mask, E, R, i, var_e, RE, RTE)
for k in range(self.n_relations):
RE[k][i] = numpy.dot(R[k], E[i])
RTE[k][i] = numpy.dot(R[k].T, E[i])
def _sample_entity(self, X, mask, E, R, i, var_e, RE=None, RTE=None):
nz_r = mask[:, i, :].nonzero()
nz_c = mask[:, :, i].nonzero()
nnz_r = nz_r[0].size
nnz_c = nz_c[0].size
nnz_all = nnz_r + nnz_c
self.features[:nnz_r] = RE[nz_r]
self.features[nnz_r:nnz_all] = RTE[nz_c]
self.xi[:nnz_r] = X[:, i, :][nz_r]
self.xi[nnz_r:nnz_all] = X[:, :, i][nz_c]
_xi = self.xi[:nnz_all] * self.features[:nnz_all].T
xi = numpy.sum(_xi, 1) / self.var_x
_lambda = numpy.identity(self.n_dim) / var_e
_lambda += numpy.dot(
self.features[:nnz_all].T,
self.features[:nnz_all]) / self.var_x
inv_lambda = numpy.linalg.inv(_lambda)
mu = numpy.dot(inv_lambda, xi)
E[i] = multivariate_normal(mu, inv_lambda)
numpy.mean(numpy.diag(inv_lambda))
# logging.debug('Mean variance E, %d, %f', i, mean_var)
def _sample_relations(self, X, mask, E, R, var_r):
EXE = numpy.kron(E, E)
for k in self.valid_relations:
if self.obs_sum[k] != 0:
self._sample_relation(X, mask, E, R, k, EXE, var_r)
else:
R[k] = numpy.random.normal(
0, var_r, size=[self.n_dim, self.n_dim])
def _sample_relation(self, X, mask, E, R, k, EXE, var_r):
_lambda = numpy.identity(self.n_dim ** 2) / var_r
xi = numpy.zeros(self.n_dim ** 2)
kron = EXE[mask[k].flatten() == 1]
if kron.shape[0] != 0:
_lambda += numpy.dot(kron.T, kron)
xi += numpy.sum(X[k, mask[k] == 1].flatten() * kron.T, 1)
_lambda /= self.var_x
# mu = numpy.linalg.solve(_lambda, xi) / self.var_x
inv_lambda = numpy.linalg.inv(_lambda)
mu = numpy.dot(inv_lambda, xi) / self.var_x
# R[k] = normal(mu, _lambda).reshape([self.n_dim, self.n_dim])
R[k] = multivariate_normal(
mu, inv_lambda).reshape([self.n_dim, self.n_dim])
numpy.mean(numpy.diag(inv_lambda))
# logging.info('Mean variance R, %d, %f', k, mean_var)
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
return self._db.get_known_instance_ids()
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
return self._db.get_known_labeller_ids()
class PandasReader(Database):
"""Reads HDF5 databases.
Attributes
----------
feature_cols : List[str]
List of feature datasets.
label_col : str
Name of label dataset.
n_features : int
Number of features.
n_instances : int
Number of instances.
n_labels : int
Number of labels per instance.
path : str
Path to HDF5 file.
encode_labels : bool
Whether to encode labels as integers.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers.
_df : pandas.DataFrame
Pandas dataframe.
"""
def __init__(self, path: str, feature_cols: List[str], label_col: str,
key: str, encode_labels: bool=True,
label_encoder: sklearn.preprocessing.LabelEncoder=None):
"""
Parameters
----------
path
Path to HDF5 file.
feature_cols
List of feature columns. If none are specified, then all non-label,
non-ID columns will be used.
label_col
Name of label dataset.
key
Pandas key.
encode_labels
Whether to encode labels as integers.
label_encoder
Encodes labels as integers. If not specified, the label column will
be read and a label encoding generated.
"""
self.path = path
self.feature_cols = feature_cols
self.label_col = label_col
self.key = key
self._df = pandas.read_hdf(self.path, self.key)
self.encode_labels = encode_labels
self.label_encoder = label_encoder
if self.label_encoder and not self.encode_labels:
raise ValueError('label_encoder specified but encode_labels is '
'False')
if self.label_encoder is None:
self.label_encoder = sklearn.preprocessing.LabelEncoder()
if not self.feature_cols:
self.feature_cols = [k for k in self._df.keys()
if k != self.label_col]
self.n_instances = len(self._df[self.label_col])
self.n_features = len(self.feature_cols)
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'PandasReader'
db_kwargs = {
'feature_cols': self.feature_cols,
'label_col': self.label_col,
'key': self.key,
'encode_labels': self.encode_labels}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def __enter__(self):
return self
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
delattr(self, '_df')
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
# TODO(MatthewJA): Optimise this.
# Allocate output features array.
features = numpy.zeros((len(ids), self.n_features))
# For each ID, get the corresponding features.
for out_index, id_ in enumerate(ids):
sel = self._df.iloc[id_]
for feature_index, feature in enumerate(self.feature_cols):
features[out_index, feature_index] = sel[feature]
return features
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.ndarray
T x N x 1 array of label vectors.
"""
# Draw a label to get the dtype.
dtype = type(self._df.iloc[0][self.label_col])
# Allocate output labels array.
labels = numpy.zeros(
(len(labeller_ids), len(instance_ids), 1),
dtype=dtype)
if len(labeller_ids) > 1:
raise NotImplementedError('Multiple labellers not yet supported.')
# For each ID, get the corresponding labels.
for out_index, id_ in enumerate(instance_ids):
sel = self._df.iloc[int(id_)]
labels[0, out_index, 0] = sel[self.label_col]
if labels.shape[2] != 1:
raise NotImplementedError('Multidimensional labels not currently '
'supported.')
# Encode labels.
if self.encode_labels:
labels = numpy.apply_along_axis(
self.label_encoder.fit_transform,
axis=1,
arr=labels.reshape(labels.shape[:2])
).reshape(labels.shape)
return labels
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
return [i for i in range(self.n_instances)]
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
raise NotImplementedError()
class FITSReader(Database):
"""Reads FITS databases.
Attributes
----------
hdu_index : int
Index of HDU in the FITS file.
feature_cols : List[str]
List of feature columns.
label_col : str
Name of label column.
n_features : int
Number of features.
n_instances : int
Number of instances.
n_labels : int
Number of labels per instance.
path : str
Path to FITS file.
encode_labels : bool
Whether to encode labels as integers.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers.
_hdulist : astropy.io.fits.HDUList
FITS HDUList.
"""
def __init__(self, path: str, feature_cols: List[str], label_col: str,
hdu_index: int=1, encode_labels: bool=True,
label_encoder: sklearn.preprocessing.LabelEncoder=None):
"""
Parameters
----------
path
Path to FITS file.
feature_cols
List of feature columns. If none are specified, then all non-label,
non-ID columns will be used.
label_col
Name of label dataset.
hdu_index
Index of HDU in the FITS file. Default is 1, i.e., the first
extension in the FITS file.
encode_labels
Whether to encode labels as integers.
label_encoder
Encodes labels as integers. If not specified, the label column will
be read and a label encoding generated.
"""
self.path = path
self.feature_cols = feature_cols
self.label_col = label_col
self.hdu_index = hdu_index
self.encode_labels = encode_labels
self.label_encoder = label_encoder
if self.label_encoder and not self.encode_labels:
raise ValueError('label_encoder specified but encode_labels is '
'False')
if self.label_encoder is None:
self.label_encoder = sklearn.preprocessing.LabelEncoder()
# These will be set when the FITS file is opened.
self.n_instances = None
self.n_features = None
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'FITSReader'
db_kwargs = {
'feature_cols': self.feature_cols,
'label_col': self.label_col,
'hdu_index': self.hdu_index,
'encode_labels': self.encode_labels}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def __enter__(self):
self._hdulist = io_fits.open(self.path)
# If we haven't specified columns, use all except the label column.
cols = self._hdulist[self.hdu_index].columns.names
if not self.feature_cols:
self.feature_cols = [k for k in cols if k != self.label_col]
self.n_features = len(self.feature_cols)
self.n_instances = \
self._hdulist[self.hdu_index].data[self.label_col].ravel().shape[0]
return self
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
self._hdulist.close()
delattr(self, '_hdulist')
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
# TODO(MatthewJA): Optimise this.
# Allocate output features array.
features = numpy.zeros((len(ids), self.n_features))
for f_index, col in enumerate(self.feature_cols):
col = self._hdulist[self.hdu_index].data[col]
features[:, f_index] = col[ids]
return features
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.p
T x N x 1 array of label vectors.
"""
label_col = self._hdulist[self.hdu_index].data[self.label_col]
labels = label_col[instance_ids].reshape((1, -1, 1))
# Encode labels.
if self.encode_labels:
labels = numpy.apply_along_axis(
self.label_encoder.fit_transform,
axis=1,
arr=labels.reshape(labels.shape[:2])
).reshape(labels.shape)
return labels
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
return [i for i in range(self.n_instances)]
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
raise NotImplementedError()
# For safe string-based access to database classes.
DATABASES = {
'ASCIIReader': ASCIIReader,
'GraphReader': GraphReader,
'HDF5Reader': HDF5Reader,
'FITSReader': FITSReader,
'ManagedHDF5Database': ManagedHDF5Database,
'GraphDatabase': GraphDatabase,
'PandasReader': PandasReader,
}
| bsd-3-clause |
rafaelmds/fatiando | gallery/datasets/hawaii_gravity.py | 6 | 2395 | """
Hawaii gravity data
-------------------
The :mod:`fatiando.datasets` package includes some data sets to make it easier
to try things out in Fatiando.
This example shows the gravity data from Hawaii.
"""
from __future__ import print_function
from fatiando.datasets import fetch_hawaii_gravity
import numpy as np
import matplotlib.pyplot as plt
# Load the gravity data from Hawaii
data = fetch_hawaii_gravity()
# The data are packaged in a dictionary. Look at the keys to see what is
# available.
print('Data keys:', data.keys())
# There are some metadata included
print('\nMetadata:\n')
print(data['metadata'])
# Let's plot all of it using the UTM x and y coordinates
shape = data['shape']
X, Y = data['x'].reshape(shape)/1000, data['y'].reshape(shape)/1000
fig = plt.figure(figsize=(14, 8))
plt.rcParams['font.size'] = 10
ax = plt.subplot(2, 3, 2)
ax.set_title('Raw gravity of Hawaii')
tmp = ax.contourf(Y, X, data['gravity'].reshape(shape), 60,
cmap='Reds')
fig.colorbar(tmp, ax=ax, pad=0, aspect=30).set_label('mGal')
ax = plt.subplot(2, 3, 3)
ax.set_title('Topography')
scale = np.abs([data['topography'].min(), data['topography'].max()]).max()
tmp = ax.contourf(Y, X, data['topography'].reshape(shape), 60,
cmap='terrain', vmin=-scale, vmax=scale)
fig.colorbar(tmp, ax=ax, pad=0, aspect=30).set_label('m')
ax = plt.subplot(2, 3, 4)
ax.set_title('Gravity disturbance')
scale = np.abs([data['disturbance'].min(), data['disturbance'].max()]).max()
tmp = ax.contourf(Y, X, data['disturbance'].reshape(shape), 60,
cmap='RdBu_r', vmin=-scale, vmax=scale)
fig.colorbar(tmp, ax=ax, pad=0, aspect=30).set_label('mGal')
# The disturbance without the effects of topography (calculated using the
# Bouguer plate)
ax = plt.subplot(2, 3, 5)
ax.set_title('Topography-free disturbance (Bouguer)')
tmp = ax.contourf(Y, X, data['topo-free-bouguer'].reshape(shape), 60,
cmap='viridis')
fig.colorbar(tmp, ax=ax, pad=0, aspect=30).set_label('mGal')
# The disturbance without the effects of topography (calculated using a
# tesseroid model of the topography)
ax = plt.subplot(2, 3, 6)
ax.set_title('Topography-free disturbance (full)')
tmp = ax.contourf(Y, X, data['topo-free'].reshape(shape), 60,
cmap='viridis')
fig.colorbar(tmp, ax=ax, pad=0, aspect=30).set_label('mGal')
plt.tight_layout()
plt.show()
| bsd-3-clause |
manazhao/tf_recsys | tensorflow/examples/learn/hdf5_classification.py | 75 | 2899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
Subsets and Splits