repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tommiseppanen/visualizations
|
tyre-model/old-plots/slip-simplified.py
|
1
|
2365
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
def coefficient(longSlipValue, latSlipValue, extremumValue, asymptoteValue, extremumSlipLong, asymptoteSlipLong, extremumSlipLat, asymptoteSlipLat):
combinedSlip = np.sqrt(latSlipValue**2+longSlipValue**2)
if (combinedSlip == 0):
return 0;
if (longSlipValue > 0):
k = latSlipValue / longSlipValue
limitExtremumX = (extremumSlipLong * extremumSlipLat) / np.sqrt(extremumSlipLat**2 + extremumSlipLong ** 2 * k ** 2)
limitExtremumY = k * limitExtremumX
limitExtremumTotal = np.sqrt(limitExtremumX**2+limitExtremumY**2)
limitAsymptoteX = (asymptoteSlipLong * asymptoteSlipLat) / np.sqrt(asymptoteSlipLat**2 + asymptoteSlipLong ** 2 * k ** 2)
limitAsymptoteY = k * limitAsymptoteX
limitAsymptoteTotal = np.sqrt(limitAsymptoteX**2+limitAsymptoteY**2)
if (combinedSlip <= limitExtremumTotal):
return (combinedSlip / limitExtremumTotal) * extremumValue
elif (combinedSlip > limitExtremumTotal and combinedSlip < limitAsymptoteTotal):
return (( asymptoteValue - extremumValue) / (limitAsymptoteTotal - limitExtremumTotal)) \
* (combinedSlip - limitExtremumTotal) + extremumValue
return asymptoteValue
if (latSlipValue <= extremumSlipLat):
return (latSlipValue / extremumSlipLat) * extremumValue
elif (latSlipValue > extremumSlipLat and latSlipValue < asymptoteSlipLat):
return (( asymptoteValue - extremumValue) / (asymptoteSlipLat - extremumSlipLat)) \
* (latSlipValue - extremumSlipLat) + extremumValue;
return asymptoteValue
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(0.0, 0.6, 0.01)
Y = np.arange(0.0, 60, 1)
xs = np.zeros(len(X)*len(Y))
ys = np.zeros(len(X)*len(Y))
zs = np.zeros(len(X)*len(Y))
c = ["" for x in range(len(X)*len(Y))]
Z = np.zeros((len(X),len(Y)))
for x in range(len(X)):
for y in range(len(Y)):
xs[x*len(Y)+y] = X[x]
ys[x*len(Y)+y] = Y[y]
value = coefficient(X[x], Y[y], 1.0, 0.75, 0.2, 0.4, 20, 40)
zs[x*len(Y)+y] = value
c[x*len(Y)+y] = 'b' if value <= 0.75 else 'r'
ax.scatter(xs, ys, zs, s = 1, c = c)
plt.show()
|
mit
|
michigraber/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
142
|
5990
|
# Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
bsd-3-clause
|
droundy/deft
|
papers/thesis-scheirer/RG_plots.py
|
1
|
4058
|
import scipy as sp
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
import pylab as plt
import matplotlib
import RG
import SW
import numpy as np
import time
import integrate
import os
import sys
###############################################################################################
# Author: Ryan Scheirer #
# Email: [email protected] #
# Date: February 2016 #
#
# Uses fsolve to find the common tangent of the free energy density vs number density... #
# ...this then constructs the temp vs filling fraction liquid-vapor coexistence plot, total...#
# ...grand free energy per volume, and many more fun plots. #
###############################################################################################
################################## START INITIALIZATION #######################################
# #
# #
### Normal temperature linspace (useful for quick troubleshooting) ###
#temp = plt.linspace(0.4,1.3,40)
numdensity = plt.linspace(0.0001,.2,1000)
numdensity2 = plt.linspace(0.0001,.2,1000)
temp = plt.linspace(0.6,1.28,20)
# #
# #
############################### END INITIALIATION #############################################
########################### START PLOTTING STUFF ##############################################
# #
# #
fns = []
fnames = []
def fns_load():
global fnames
dirname = os.getcwd()
files = os.listdir(dirname+'/data')
for arg in files:
if 'f5.out' in arg:
fnames.append(dirname+'/data/'+arg)
for arg in fnames:
f05data = np.loadtxt(arg)
f05 = [f05data[i][1] for i in range(0,len(f05data))]
numdensity = [f05data[i][0] for i in range(0,len(f05data))]
f05interp = interp1d(numdensity,f05,kind='cubic')
fns.append(f05interp)
def fns_tot(n,i):
return fns_ext(n,i) + RG.a1SW(n)*n
def fns_ext(numdensity,i):
if numdensity > 0.0001 and numdensity < 0.2:
return fns[i](numdensity)
return RG.fiterative(temp[i],numdensity,0)
def plotstuff():
numdensity3 = plt.linspace(0.0001,.2,1000)
fns_load()
for i in range(0,len(fns)):
y = []
for j in range(0,len(numdensity3)):
y.append(float(fns_tot(float(numdensity3[j]),i)))
plt.figure()
plt.plot(numdensity3,y)
plt.title(fnames[i])
savename = os.getcwd()
#savename += '/figs/'
savename += fnames[i].split('data/')[1].split('.out')[0]
savename += '.pdf'
#print savename
plt.savefig(savename)
#plt.show()
plotstuff()
# #
# #
######################################## END PLOTTING STUFF ###################################
|
gpl-2.0
|
gully/PyKE
|
pyke/kepffi.py
|
2
|
24526
|
from .utils import PyKEArgumentHelpFormatter
import sys
import os
import urllib
import re
import math
import numpy as np
from astropy.io import fits as pyfits
from matplotlib import pyplot as plt
from . import kepio, kepmsg, kepkey
__all__ = ['kepffi']
# global variables
ffifile = False; aperfile = False; maskfile = 'KeplerFFI.txt'
plotfile = 'KeplerFFI.png'; pimg = None; mask = []; zscale = False
xmin = 0.0; xmax = 1000.0; ymin = 0.0; ymax = 1000.0; zmin = False; zmax = False
kepid = ''; ra = ''; dec = ''; kepmag = ''; season = ''; quarter = -1
skygroup = ''; channel = ''; module = ''; output = ''; column = ''; row = ''
colmap='jet'; aid = None; cid = None; did = None; eid = None; fid = None
pkepmag = None; pkepid = None; pra = None; pdec = None
# -----------------------------------------------------------
# core code
def kepffi(ffifile, kepid, ra, dec, aperfile, imin, imax, iscale, cmap, npix,
verbose=False, logfile='kepffi.log'):
"""
kepffi -- Display a portion of a Full Frame Image (FFI) and define custom
target apertures
Parameters
----------
ffifile : str
The name of a MAST standard format Full Frame Image (FFI) FITS file
containing a Kepler channel image within each data extension.
kepid : str
The numerical Kepler identification number for a specific source,
obtained from the MAST Target Search page.
ra : str
The J2000 Right Ascension of a target in decimal degrees or sexadecimal
hours (hh:mm:ss.ss). In conjunction with dec, this parameter overrides
the content of kepid.
dec : str
The J2000 Declination of a target in decimal degrees or sexadecimal
degrees (dd:mm:ss.s). In conjunction with ra, this parameter argument
overrides the content of kepid.
aperfile : str
The (directory path and) name of an existing custom aperture definition
file. If provided, this aperture will be plotted over the displayed
image.
imin : float
Sets the minimum intensity range for the image display. The user can
select the minimum level (in electrons per cadence) with this
parameter. The default minimum intensity level is the median of the
faintest 10% of pixels in the image.
imax : float
Sets the maximum intensity range for the image display. The user can
select the maximum level (in electrons per cadence) with this
parameter. The default maximum intensity level is the median of the
brightest 10% of pixels in the image.
iscale : str
The type of intensity scaling for the image display.
Options:
* linear
* logarithmic
* squareroot
cmap : str
Color intensity scheme for the image display.
npix : int
The pixel size of the square subimage extracted from the FFI for
display.
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning messages.
"""
global pimg, zscale, zmin, zmax, xmin, xmax, ymin, ymax, quarter
global kepmag, skygroup, season, channel
global module, output, row, column, maskfile, plotfile
global pkepid, pkepmag, pra, pdec, colmap, mask
# input arguments
maskfile = 'kepffi-' + str(kepid) + '.txt'
plotfile = 'kepffi-' + str(kepid) + '.png'
zmin = imin; zmax = imax; zscale = iscale; colmap = cmap
# logg the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPFFI -- '
+ ' ffifile={}'.format(ffifile)
+ ' kepid={}'.format(str(kepid))
+ ' ra={}'.format(ra)
+ ' dec={}'.format(dec)
+ ' aperfile={}'.format(aperfile)
+ ' imin={}'.format(imin)
+ ' imax={}'.format(imax)
+ ' iscale={}'.format(iscale)
+ ' cmap={}'.format(cmap)
+ ' npix={}'.format(npix)
+ ' verbose={}'.format(chatter)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPFFI started at', logfile, verbose)
# open existing mask file
if kepio.fileexists(aperfile):
lines = kepio.openascii(aperfile, 'r', logfile, verbose)
for line in lines:
line = line.strip().split('|')
y0 = int(line[3])
x0 = int(line[4])
pixels = line[5].split(';')
for pixel in pixels:
m = y0 + int(pixel.split(',')[0])
n = x0 + int(pixel.split(',')[1])
mask.append(str(m)+', '+str(n))
kepio.closeascii(lines, logfile, verbose)
# RA and Dec conversion
if kepid == 'None' or kepid == 'none' or kepid.strip() == '':
try:
mra = float(ra)
mdec = float(dec)
except:
try:
mra, mdec = sex2dec(ra, dec)
except:
txt = 'ERROR -- no sensible RA and Dec coordinates provided'
sys.exit(txt)
# open FFI FITS file
ffi = pyfits.open(ffifile, 'readonly')
try:
quarter = ffi[0].header['QUARTER']
except:
try:
dateobs = ffi[0].header['DATE-OBS']
if dateobs == '2009-04-24': quarter = 0
if dateobs == '2009-04-25': quarter = 0
if dateobs == '2009-04-26': quarter = 0
if dateobs == '2009-06-19': quarter = 2
if dateobs == '2009-08-19': quarter = 2
if dateobs == '2009-09-17': quarter = 2
if dateobs == '2009-10-19': quarter = 3
if dateobs == '2009-11-18': quarter = 3
if dateobs == '2009-12-17': quarter = 3
except:
sys.exit('ERROR -- cannot determine quarter when FFI was taken'
' . Either a\n QUARTER or DATE-OBS keyword is expected in'
' the primary header.')
if quarter == 0:
quarter = 1
if quarter < 0:
sys.exit('ERROR -- cannot determine quarter from FFI.')
if int(quarter) == 0:
season = 3
else:
season = (int(quarter) - 2) % 4
# locate target in MAST
try:
kepid, ra, dec, kepmag, skygroup, channel, module, output, row, \
column = MASTKepID(kepid, season)
pkepmag = kepmag; pkepid = kepid
except:
kepid, ra, dec, kepmag, skygroup, channel, module, output, row, \
column = MASTRADec(mra, mdec, 8.0, season)
ra,dec = dec2sex(ra, dec)
pra = ra; pdec = dec
print(kepid, ra, dec, kepmag, skygroup, channel, module, output, row,
column)
# read and close FFI FITS file
img = readimage(ffi, int(channel))
ffi.close()
# print target data
print(''
+ ' KepID: %s'.format(kepid)
+ ' RA (J2000): %s'.format(ra)
+ 'Dec (J2000): %s'.format(dec)
+ ' KepMag: %s'.format(kepmag)
+ ' SkyGroup: %2s'.format(skygroup)
+ ' Season: %2s'.format(season)
+ ' Channel: %2s'.format(channel)
+ ' Module: %2s'.format(module)
+ ' Output: %1s'.format(output)
+ ' Column: %4s'.format(column)
+ ' Row: %4s'.format(row)
+ '')
# subimage of channel for plot
ymin = int(max([int(row) -npix /2, 0]))
ymax = int(min([int(row) +npix /2 + 1, img.shape[0]]))
xmin = int(max([int(column) - npix / 2, 0]))
xmax = int(min([int(column) + npix / 2 + 1, img.shape[1]]))
# intensity scale
nstat = 2; pixels = []
for i in range(ymin, ymax + 1):
for j in range(xmin, xmax + 1):
pixels.append(img[i, j])
pixels = np.array(np.sort(pixels), dtype=np.float32)
if int(float(len(pixels)) / 10 + 0.5) > nstat:
nstat = int(float(len(pixels)) / 10 + 0.5)
if not zmin:
zmin = np.median(pixels[:nstat])
if not zmax:
zmax = np.median(pixels[-nstat:])
if 'log' in zscale:
img = np.log10(img)
zmin = math.log10(zmin)
zmax = math.log10(zmax)
if 'sq' in zscale:
img = np.sqrt(img)
zmin = math.sqrt(zmin)
zmax = math.sqrt(zmax)
pimg = img[ymin:ymax, xmin:xmax]
# plot limits
ymin = float(ymin) - 0.5
ymax = float(ymax) - 0.5
xmin = float(xmin) - 0.5
xmax = float(xmax) - 0.5
# plot style
plt.figure(figsize=[10, 7])
plotimage()
plt.show()
# -----------------------------------------------------------
# plot channel image
def plotimage():
global aid, cid, did, eid, fid
# print FFI and source location data on plot
plt.clf()
plt.axes([0.73, 0.09, 0.25, 0.4])
plt.text(0.1, 1.0, ' KepID: {}'.format(pkepid), fontsize=12)
plt.text(0.1, 0.9, ' RA (J2000): {}'.format(pra), fontsize=12)
plt.text(0.1, 0.8, 'Dec (J2000): {}'.format(pdec), fontsize=12)
plt.text(0.1, 0.7, ' KepMag: {}'.format(pkepmag), fontsize=12)
plt.text(0.1, 0.6, ' SkyGroup: {}'.format(skygroup), fontsize=12)
plt.text(0.1, 0.5, ' Season: {}'.format(season), fontsize=12)
plt.text(0.1, 0.4, ' Channel: {}'.format(channel), fontsize=12)
plt.text(0.1, 0.3, ' Module: {}'.format(module), fontsize=12)
plt.text(0.1, 0.2, ' Output: {}'.format(output), fontsize=12)
plt.text(0.1, 0.1, ' Column: {}'.format(column), fontsize=12)
plt.text(0.1, 0.0, ' Row: {}'.format(row), fontsize=12)
plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])
plt.xlim(0.0, 1.0)
plt.ylim(-0.05, 1.12)
# clear button
plt.axes([0.73, 0.87, 0.25, 0.09])
plt.text(0.5, 0.5, 'CLEAR', fontsize=24, weight='heavy',
horizontalalignment='center', verticalalignment='center')
plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])
plt.fill([0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
aid = plt.connect('button_press_event', clicker1)
# dump custom aperture to file button
plt.axes([0.73, 0.77, 0.25, 0.09])
plt.text(0.5, 0.5, 'DUMP', fontsize=24, weight='heavy',
horizontalalignment='center', verticalalignment='center')
plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])
plt.fill([0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
cid = plt.connect('button_press_event', clicker3)
# print window to png file button
plt.axes([0.73, 0.67, 0.25, 0.09])
plt.text(0.5, 0.5, 'PRINT', fontsize=24, weight='heavy',
horizontalalignment='center', verticalalignment='center')
plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])
plt.fill([0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
did = plt.connect('button_press_event', clicker4)
# print close plot file button
plt.axes([0.73, 0.57, 0.25, 0.09])
plt.text(0.5, 0.5, 'CLOSE', fontsize=24, weight='heavy',
horizontalalignment='center', verticalalignment='center')
plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])
plt.fill([0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
eid = plt.connect('button_press_event', clicker5)
# plot the image window
ax = plt.axes([0.08, 0.09, 0.63, 0.88])
plt.subplots_adjust(0.06, 0.1, 0.93, 0.88)
plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
labels = ax.get_yticklabels()
plt.setp(labels, 'rotation', 90)
plt.imshow(pimg, aspect='auto', interpolation='nearest', origin='lower',
vmin=zmin, vmax=zmax, extent=(xmin, xmax, ymin, ymax), cmap=colmap)
plt.gca().set_autoscale_on(False)
plt.xlabel('Pixel Column Number', {'color' : 'k'})
plt.ylabel('Pixel Row Number', {'color' : 'k'})
plt.grid()
# plot the mask
if colmap in ['Greys', 'binary', 'bone', 'gist_gray', 'gist_yarg', 'gray',
'pink', 'RdGy']:
sqcol = 'g'
alpha = 0.5
else:
sqcol = '#ffffee'
alpha = 0.8
for pixel in mask:
m = int(pixel.split(',')[0])
n = int(pixel.split(',')[1])
y = [m-0.5, m+0.5, m+0.5, m-0.5, m-0.5]
x = [n-0.5, n-0.5, n+0.5, n+0.5, n-0.5]
plt.fill(x, y, sqcol, alpha=alpha, ec=sqcol)
# print x,y
fid = plt.connect('key_press_event', clicker6)
# render plot
plt.show()
# -----------------------------------------------------------
# target data retrieval from MAST based upon KepID
def MASTKepID(id,season):
global skygroup, column, row
# build mast query
url = 'http://archive.stsci.edu/kepler/kepler_fov/search.php?'
url += 'action=Search'
url += '&kic_kepler_id=' + id
url += '&max_records=100'
url += '&verb=3'
url += '&outputformat=CSV'
# retrieve results from MAST
lines = urllib.urlopen(url)
for line in lines:
line = line.strip()
out = line.split(',')
if len(out) > 0:
kepid = out[3]
ra = out[0]
dec = out[1]
kepmag = out[43]
skygroup = out[74]
channel = out[95 + season * 5]
module = out[96 + season * 5]
output = out[97 + season * 5]
row = out[98 + season * 5]
column = out[99 + season * 5]
else:
txt = 'ERROR -- no target found with KepID {}'.format(id)
sys.exit(txt)
return (kepid, ra, dec, kepmag, skygroup, channel, module, output, row,
column)
# -------------------------------------
# detector location retrieval based upon RA and Dec
def MASTRADec(ra, dec, darcsec, season):
global skygroup, column, row
# WCS data
cd1_1 = 0.000702794927969
cd1_2 = -0.000853190160515
cd2_1 = -0.000853190160515
cd2_2 = -0.000702794927969
cd = np.array([[cd1_1, cd1_2], [cd2_1, cd2_2]])
cd = linalg.inv(cd)
# coordinate limits
x1 = 1.0e30
x2 = x1
darcsec /= 3600.0
ra1 = ra - darcsec / 15.0 / cos(dec * pi / 180)
ra2 = ra + darcsec / 15.0 / cos(dec * pi / 180)
dec1 = dec - darcsec
dec2 = dec + darcsec
# build mast query
url = 'http://archive.stsci.edu/kepler/kepler_fov/search.php?'
url += 'action=Search'
url += '&kic_degree_ra=' + str(ra1) + '..' + str(ra2)
url += '&kic_dec=' + str(dec1) + '..' + str(dec2)
url += '&max_records=100'
url += '&verb=3'
url += '&outputformat=CSV'
# retrieve results from MAST: nearest KIC source to supplied coordinates
z = ''
x = 1.0e30
lines = urllib.urlopen(url)
for line in lines:
line = line.strip()
if (len(line) > 0 and
'Kepler' not in line and
'integer' not in line and
'no rows found' not in line):
out = line.split(',')
r = (float(out[6].split(' ')[0]) +
float(out[6].split(' ')[1]) / 60.0 +
float(out[6].split(' ')[2]) / 3600.0) * 15.0
d = (float(out[7].split(' ')[0]) +
float(out[7].split(' ')[1]) / 60.0 +
float(out[7].split(' ')[2]) / 3600.0)
a = sqrt((abs(r - ra) / 15.0 / cos(d * pi / 180))**2 + abs(d - dec)**2)
if a < x:
x = a
z = line.split(',')
if len(z) > 0:
kepid = None
kepmag = None
skygroup = out[73]
channel = out[94 + season * 5]
module = out[95 + season * 5]
output = out[96 + season * 5]
else:
txt = ('ERROR -- row and column could not be calculated. Is location'
' on silicon?')
sys.exit(txt)
# convert coordinates to decimal for the two targets, determine distance from input
zra, zdec = sex2dec(z[6], z[7])
dra = zra - ra
ddec = zdec - dec
drow = cd[0, 0] * dra + cd[0, 1] * ddec
dcol = cd[1, 0] * dra + cd[1, 1] * ddec
# pixel coordinates of the nearest KIC target
row = z[97 + season * 5]
column = z[98 + season * 5]
# pixel coordinate of target
row = str(int(float(row) + drow + 0.5))
column = str(int(float(column) + dcol + 0.5))
return (kepid, ra, dec, kepmag, skygroup, channel, module, output, row,
column)
# -----------------------------------
# convert sexadecimal hours to decimal degrees
def sex2dec(ra, dec):
ra = re.sub('\s+', '|', ra.strip())
ra = re.sub(':', '|', ra.strip())
ra = re.sub(';', '|', ra.strip())
ra = re.sub(',', '|', ra.strip())
ra = re.sub('-', '|', ra.strip())
ra = ra.split('|')
outra = (float(ra[0]) + float(ra[1]) / 60 + float(ra[2]) / 3600) * 15.0
dec = re.sub('\s+', '|', dec.strip())
dec = re.sub(':', '|', dec.strip())
dec = re.sub(';', '|', dec.strip())
dec = re.sub(',', '|', dec.strip())
dec = re.sub('-', '|', dec.strip())
dec = dec.split('|')
if float(dec[0]) > 0.0:
outdec = float(dec[0]) + float(dec[1]) / 60 + float(dec[2]) / 3600
else:
outdec = float(dec[0]) - float(dec[1]) / 60 - float(dec[2]) / 3600
return outra, outdec
# -----------------------------------
# convert decimal RA and Dec to sexagesimal
def dec2sex(ra, dec):
if ra < 0.0 or ra > 360.0 or dec < -90.0 or dec > 90.0:
sys.exit('ERROR -- badly defined RA and Dec provided')
tmp = ra / 15
ra_h = str(int(tmp))
tmp = (tmp - float(ra_h)) * 60.0
ra_m = str(int(tmp))
tmp = (tmp - float(ra_m)) * 6000.0
print(tmp, float(int(tmp + 0.5)))
ra_s = '{}'.format(float(int(tmp + 0.5)) / 100)
if dec < 0.0:
tmp = -dec
else:
tmp = dec
dec_h = str(int(tmp))
tmp = (tmp - float(dec_h)) * 60.0
dec_m = str(int(tmp))
tmp = (tmp - float(dec_m)) * 600.0
dec_s = '%.1f' % (int(tmp + 0.5) / 10)
if dec < 0.0:
dec_h = '-' + dec_h
outra = ra_h + ':' + ra_m + ':' + ra_s
outdec = dec_h + ':' + dec_m + ':' + dec_s
return outra, outdec
# -----------------------------------------------------------
# read image from HDU structure
def readimage(struct, hdu):
try:
imagedata = struct[hdu].data
except:
sys.exit('ERROR -- cannot read image data from HDU {}'.format(hdu))
return imagedata
# -----------------------------------------------------------
# clear all pixels from pixel mask
def clicker1(event):
global mask, aid, cid, did, eid, fid
if event.inaxes:
if event.button == 1:
if (event.x > 585 and event.x < 783 and
event.y > 488 and event.y < 537):
plt.disconnect(aid)
plt.disconnect(cid)
plt.disconnect(did)
plt.disconnect(eid)
plt.disconnect(fid)
mask = []
plt.clf()
plotimage()
# -----------------------------------------------------------
# dump custom aperture definition file
def clicker3(event):
global aid, cid, did, eid, fid
if event.inaxes:
if event.button == 1:
if (event.x > 585 and event.x < 783 and
event.y > 432 and event.y < 480):
masktxt = 'NEW|'
masktxt += skygroup + '|'
masktxt += '{' + re.sub('\s+',':',str(ra))
masktxt += ',' + re.sub('\s+',':',str(dec))
masktxt += '},TAD_NO_HALO,TAD_NO_UNDERSHOOT_COLUMN|'
masktxt += row + '|'
masktxt += column + '|'
for coord in sorted(set(mask)):
masktxt += str(int(coord.split(',')[0]) - int(row)) + ','
masktxt += str(int(coord.split(',')[1]) - int(column)) + ';'
if os.path.isfile(maskfile):
os.remove(maskfile)
out = open(maskfile,'a')
out.write(masktxt[:-1]+'\n')
out.close()
print('Wrote custom aperture definition file ' + maskfile)
return
# -----------------------------------------------------------
# print plot to png with left-mouse click
def clicker4(event):
if event.inaxes:
if event.button == 1:
if (event.x > 585 and event.x < 783 and
event.y > 377 and event.y < 425):
plt.savefig(plotfile)
print('Wrote plot hardcopy file {}'.format(plotfile))
# -----------------------------------------------------------
# close plot and exit program
def clicker5(event):
global mask, aid, cid, did, eid, fid, done
if event.inaxes:
if event.button == 1:
if (event.x > 585 and event.x < 783 and
event.y > 320 and event.y < 368):
plt.disconnect(aid)
plt.disconnect(cid)
plt.disconnect(did)
plt.disconnect(eid)
plt.disconnect(fid)
# -----------------------------------------------------------
# this function will be called with every click of the mouse
def clicker6(event):
global mask, aid, cid, did, eid, fid
if event.inaxes:
if event.key == 'x':
if colmap in ['Greys','binary','bone','gist_gray','gist_yarg',
'gray','pink','RdGy']:
sqcol = 'g'
alpha = 0.5
else:
sqcol = '#ffffee'
alpha = 0.8
m = float(int(event.xdata + 0.5))
n = float(int(event.ydata + 0.5))
txt = str(int(n))+','+str(int(m))
if txt in mask:
tmpmask = []
for pixel in mask:
if pixel != txt:
tmpmask.append(pixel)
mask = tmpmask
else:
mask.append(txt)
plotimage()
def kepffi_main():
import argparse
parser = argparse.ArgumentParser(
description=('Plot sub-areas of Kepler Full Frame Images and'
' define custom target apertures'),
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('ffifile', help='name of input FFI FITS file',
type=str)
parser.add_argument('--kepid', default='',
help='Kepler ID of target from Kepler Input Catalog',
type=str)
parser.add_argument('--ra', default='',
help='Right Ascension of target J2000 [hours or deg]',
type=str)
parser.add_argument('--dec', default='', help='Declination of target J2000 [deg]',
type=str)
parser.add_argument('--aperfile', default='',
help='name of ASCII custom aperture definition file',
type=str)
parser.add_argument('--imin', default=1.5E5,
help='minimum of image intensity scale [e-]',
type=float)
parser.add_argument('--imax', default=5.0E6,
help='minimum of image intensity scale [e-]',
type=float)
parser.add_argument('--iscale', default='logarithmic',
help='type of image intensity scale', type=str,
choices=['linear','logarithmic','squareroot'])
parser.add_argument('--cmap', default='PuBu', help='image colormap',
type=str)
parser.add_argument('--npix', default=30,
help='pixel dimension of subimage', type=float)
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='kepffi.log', dest='logfile', type=str)
args = parser.parse_args()
kepffi(args.ffifile, args.kepid, args.ra, args.dec, args.aperfile,
args.imin, args.imax, args.iscale, args.cmap, args.npix,
args.verbose, args.logfile)
|
mit
|
queirozfcom/titanic
|
src/models/myfirstforest.py
|
3
|
3926
|
""" Writing my first randomforest code.
Author : AstroDave
Date : 23rd September, 2012
please see packages.python.org/milk/randomforests.html for more
"""
import numpy as np
import csv as csv
from sklearn.ensemble import RandomForestClassifier
csv_file_object = csv.reader(open('train.csv', 'rb')) #Load in the training csv file
header = csv_file_object.next() #Skip the fist line as it is a header
train_data=[] #Creat a variable called 'train_data'
for row in csv_file_object: #Skip through each row in the csv file
train_data.append(row[1:]) #adding each row to the data variable
train_data = np.array(train_data) #Then convert from a list to an array
#I need to convert all strings to integer classifiers:
#Male = 1, female = 0:
train_data[train_data[0::,3]=='male',3] = 1
train_data[train_data[0::,3]=='female',3] = 0
#embark c=0, s=1, q=2
train_data[train_data[0::,10] =='C',10] = 0
train_data[train_data[0::,10] =='S',10] = 1
train_data[train_data[0::,10] =='Q',10] = 2
#I need to fill in the gaps of the data and make it complete.
#So where there is no price, I will assume price on median of that class
#Where there is no age I will give median of all ages
#All the ages with no data make the median of the data
train_data[train_data[0::,4] == '',4] = np.median(train_data[train_data[0::,4]\
!= '',4].astype(np.float))
#All missing ebmbarks just make them embark from most common place
train_data[train_data[0::,10] == '',10] = np.round(np.mean(train_data[train_data[0::,10]\
!= '',10].astype(np.float)))
train_data = np.delete(train_data,[2,7,9],1) #remove the name data, cabin and ticket
#I need to do the same with the test data now so that the columns are in the same
#as the training data
test_file_object = csv.reader(open('test.csv', 'rb')) #Load in the test csv file
header = test_file_object.next() #Skip the fist line as it is a header
test_data=[] #Creat a variable called 'test_data'
ids = []
for row in test_file_object: #Skip through each row in the csv file
ids.append(row[0])
test_data.append(row[1:]) #adding each row to the data variable
test_data = np.array(test_data) #Then convert from a list to an array
#I need to convert all strings to integer classifiers:
#Male = 1, female = 0:
test_data[test_data[0::,2]=='male',2] = 1
test_data[test_data[0::,2]=='female',2] = 0
#ebark c=0, s=1, q=2
test_data[test_data[0::,9] =='C',9] = 0 #Note this is not ideal, in more complex 3 is not 3 tmes better than 1 than 2 is 2 times better than 1
test_data[test_data[0::,9] =='S',9] = 1
test_data[test_data[0::,9] =='Q',9] = 2
#All the ages with no data make the median of the data
test_data[test_data[0::,3] == '',3] = np.median(test_data[test_data[0::,3]\
!= '',3].astype(np.float))
#All missing ebmbarks just make them embark from most common place
test_data[test_data[0::,9] == '',9] = np.round(np.mean(test_data[test_data[0::,9]\
!= '',9].astype(np.float)))
#All the missing prices assume median of their respectice class
for i in xrange(np.size(test_data[0::,0])):
if test_data[i,7] == '':
test_data[i,7] = np.median(test_data[(test_data[0::,7] != '') &\
(test_data[0::,0] == test_data[i,0])\
,7].astype(np.float))
test_data = np.delete(test_data,[1,6,8],1) #remove the name data, cabin and ticket
#The data is now ready to go. So lets train then test!
print 'Training '
forest = RandomForestClassifier(n_estimators=100)
forest = forest.fit(train_data[0::,1::],\
train_data[0::,0])
print 'Predicting'
output = forest.predict(test_data)
open_file_object = csv.writer(open("myfirstforest.csv", "wb"))
open_file_object.writerow(["PassengerId","Survived"])
open_file_object.writerows(zip(ids, output))
|
mit
|
sogis/Quantum-GIS
|
python/plugins/processing/algs/qgis/MeanAndStdDevPlot.py
|
19
|
3553
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
MeanAndStdDevPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class MeanAndStdDevPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
MEAN_FIELD = 'MEAN_FIELD'
STDDEV_FIELD = 'STDDEV_FIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Mean and standard deviation plot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterTable(self.INPUT,
self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
self.tr('Category name field'), self.INPUT,
ParameterTableField.DATA_TYPE_ANY))
self.addParameter(ParameterTableField(self.MEAN_FIELD,
self.tr('Mean field'), self.INPUT))
self.addParameter(ParameterTableField(self.STDDEV_FIELD,
self.tr('StdDev field'), self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Plot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
meanfieldname = self.getParameterValue(self.MEAN_FIELD)
stddevfieldname = self.getParameterValue(self.STDDEV_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, meanfieldname, stddevfieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[meanfieldname], width, color='r',
yerr=values[stddevfieldname],
error_kw=dict(ecolor='yellow'),
)
plt.xticks(ind, values[namefieldname], rotation=45)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
|
gpl-2.0
|
sorend/fylearn
|
fylearn/fpt.py
|
1
|
11132
|
# -*- coding: utf-8 -*-
"""Fuzzy pattern tree based methods
The module structure is the following:
- The "FuzzyPatternTreeClassifier" implements the fit logic for bottom-up
construction of the fuzzy pattern tree [1].
- The "FuzzyPatternTreeTopDownClassifier" implements the fit logic for top-down
construction of the fuzzy pattern tree [2].
- The "FuzzyPatternTreeRegressor" implements a regressor based on
top-down constructed fuzzy pattern tree [3].
References:
[1] Z. Huang, T. D. Gedeon, and M. Nikravesh, "Pattern trees induction: A new machine
learning method," IEEE Trans. Fuzzy Syst., vol. 16, no. 4, pp. 958–970, Aug. 2008.
[2] R. Senge, and E. Hüllermeier, "Top-down induction of fuzzy pattern trees," IEEE
Trans. Fuzzy Syst., vol. 19, no. 2, pp. 241-252, Apr. 2011.
[3] R. Senge, and E. Hüllermeier, "Pattern trees for regression and fuzzy systems
modeling," in Proc. IEEE Int. Conf. on Fuzzy Syst., 2010.
"""
import numpy as np
import heapq
from sklearn.metrics import mean_squared_error
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_array
import fylearn.fuzzylogic as fl
# aggregation operators to use
OPERATORS = (
fl.min,
fl.einstein_i,
fl.lukasiewicz_i,
fl.prod,
fl.owa([0.2, 0.8]),
fl.owa([0.4, 0.6]),
fl.mean,
fl.owa([0.6, 0.4]),
fl.owa([0.8, 0.2]),
fl.algebraic_sum,
fl.lukasiewicz_u,
fl.einstein_u,
fl.max
)
def _tree_iterator(root):
Q = [ root ]
while Q:
tree = Q.pop(0)
if isinstance(tree, Inner):
Q.extend(tree.branches_)
yield tree
def _tree_leaves(root):
return [ x for x in _tree_iterator(root) if isinstance(x, Leaf) ]
def _tree_clone_replace_leaf(root, replace_node, new_node):
if root == replace_node:
return new_node
else:
if isinstance(root, Leaf):
return root
else:
new_branches = [ _tree_clone_replace_leaf(b, replace_node, new_node) for b in root.branches_ ]
return Inner(root.aggregation_, new_branches)
def _tree_contains(root, to_find):
for n in _tree_iterator(root):
if n == to_find:
return True
return False
def default_rmse(a, b):
return 1.0 - mean_squared_error(a, b)
def default_fuzzifier(idx, F):
"""Default fuzzifier function.
Creates three fuzzy sets with triangular membership functions: (low, med, hig) from min and max data points.
"""
# get min/max from data
v_min = np.nanmin(F)
v_max = np.nanmax(F)
# blarg
return [ Leaf(idx, "low", fl.TriangularSet(v_min - (v_max - v_min) ** 2, v_min, v_max)),
Leaf(idx, "med", fl.TriangularSet(v_min, v_min + ((v_max - v_min) / 2), v_max)),
Leaf(idx, "hig", fl.TriangularSet(v_min, v_max, v_max + (v_max - v_min) ** 2)) ]
def _select_candidates(candidates, n_select, class_vector, similarity_measure, X):
"""Select a number of candidate trees with the best similarity to the class vector."""
R = [ _evaluate_similarity(c, class_vector, similarity_measure, X) for c in candidates ]
return heapq.nlargest(n_select, R, key=lambda x: x[0])
def _evaluate_similarity(candidate, class_vector, similarity_measure, X):
y_pred = candidate(X)
s = similarity_measure(y_pred, class_vector)
return (s, candidate)
class Tree:
pass
class Leaf(Tree):
def __init__(self, idx, name, mu):
self.idx = idx
self.name = name
self.mu = mu
def __repr__(self):
return "Leaf(" + repr(self.idx) + "_" + self.name + ")"
def __call__(self, X):
return self.mu(X[:, self.idx]) # apply the membership function to the specific feature idx
class Inner(Tree):
def __init__(self, aggregation, branches):
self.branches_ = branches
self.aggregation_ = aggregation
def __repr__(self):
return "(" + repr(self.aggregation_.__name__) + ", " + ", ".join([ repr(x) for x in self.branches_ ]) + ")"
def __call__(self, X):
# output for each branches
R = np.zeros((X.shape[0], len(self.branches_)))
for idx, branch in enumerate(self.branches_):
R[:, idx] = branch(X)
return self.aggregation_(R)
class FuzzyPatternTreeClassifier(BaseEstimator, ClassifierMixin):
"""Fuzzy pattern tree classifier"""
def __init__(self,
similarity_measure=default_rmse,
max_depth=5,
num_candidates=2,
num_slaves=3,
fuzzifier=default_fuzzifier):
"""Construct classifier
Params
------
similarity_measure : similarity measure to use (default default_rmse)
max_depth : max depth of tree (default 5)
num_candidates : number of candidates (default 2)
num_slaves : number of slaves (default 3)
fuzzifier : fuzzifier to fuzzify input (default: default_fuzzifier)
"""
self.similarity_measure = similarity_measure
self.max_depth = max_depth
self.num_candidates = num_candidates
self.num_slaves = num_slaves
self.fuzzifier = fuzzifier
def get_params(self, deep=True):
return {"similarity_measure": self.similarity_measure,
"max_depth": self.max_depth,
"num_candidates": self.num_candidates,
"num_slaves": self.num_slaves,
"fuzzifier": self.fuzzifier}
def set_params(self, **params):
for key, value in params.items():
self.setattr(key, value)
return self
def fit(self, X, y):
X = check_array(X)
self.classes_, y = np.unique(y, return_inverse=True)
if np.nan in self.classes_:
raise Exception("nan not supported for class values")
self.trees_ = {}
# build membership functions
P = []
for feature_idx, feature in enumerate(X.T):
P.extend(self.fuzzifier(feature_idx, feature))
# build the pattern tree for each class
for class_idx, class_value in enumerate(self.classes_):
class_vector = np.zeros(len(y))
class_vector[y == class_idx] = 1.0
root = self.build_for_class(X, y, class_vector, list(P))
self.trees_[class_idx] = root
return self
def build_for_class(self, X, y, class_vector, P):
S = []
C = _select_candidates(P, self.num_candidates, class_vector, self.similarity_measure, X)
for depth in range(self.max_depth):
P_U_S = list(P)
P_U_S.extend([ s[1] for s in S ])
new_candidates = self.select_slaves(C, P_U_S, class_vector, X)
# no new candidates found
if len(new_candidates) == 0:
break
S.extend(new_candidates)
# no better similarity received
if new_candidates[0][0] < C[0][0]:
break
# clean out primitive trees
for s in S:
P = [ p for p in P if not _tree_contains(s[1], p) ]
# remove primitives already in candidates
for c in new_candidates:
P = [ p for p in P if not _tree_contains(c[1], p) ]
C = new_candidates
# first candidates
return C[0][1]
def select_slaves(self, candidates, P_U_S, class_vector, X):
R = []
for candidate in candidates:
aggregates = []
for other in P_U_S:
if not _tree_contains(candidate[1], other):
aggregates.extend([ Inner(a, [ candidate[1], other ]) for a in OPERATORS ])
R.extend(_select_candidates(aggregates, self.num_slaves, class_vector, self.similarity_measure, X))
R = sorted(R, key=lambda x: x[0])
RR = []
used_nodes = set()
for candidate in R:
inner_node = candidate[1]
found = False
for tree in inner_node.branches_:
if tree in used_nodes:
found = True
if not found:
used_nodes.update(inner_node.branches_)
RR.append(candidate)
return heapq.nlargest(self.num_slaves, RR, key=lambda x: x[0])
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : Array-like of shape [n_samples, n_features]
The input to classify.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
X = check_array(X)
if self.trees_ is None:
raise Exception("Pattern trees not initialized. Perform a fit first.")
y_classes = np.zeros((X.shape[0], len(self.classes_)))
for i, c in enumerate(self.classes_):
y_classes[:, i] = self.trees_[i](X)
# predict the maximum value
return self.classes_.take(np.argmax(y_classes, -1))
class FuzzyPatternTreeTopDownClassifier(FuzzyPatternTreeClassifier):
"""
Fuzzy Pattern Tree with Top Down induction algorithm.
"""
def __init__(self,
similarity_measure=default_rmse,
relative_improvement=0.01,
num_candidates=5,
fuzzifier=default_fuzzifier):
self.similarity_measure = similarity_measure
self.relative_improvement = relative_improvement
self.num_candidates = num_candidates
self.fuzzifier = fuzzifier
def get_params(self, deep=True):
return {"similarity_measure": self.similarity_measure,
"relative_improvement": self.relative_improvement,
"num_candidates": self.num_candidates,
"fuzzifier": self.fuzzifier}
def select_slaves(self, C, P, class_vector, num_candidates, X):
R = []
for candidate in C:
c = candidate[1]
modified = []
candidate_leaves = _tree_leaves(c)
for c_leaf in candidate_leaves:
for p_leaf in [ p for p in P if p not in candidate_leaves ]:
for aggr in OPERATORS:
modified.append(_tree_clone_replace_leaf(c, c_leaf, Inner(aggr, [ c_leaf, p_leaf ])))
R.extend(_select_candidates(modified, self.num_candidates, class_vector, self.similarity_measure, X))
R = list(heapq.nlargest(self.num_candidates, R, key=lambda x: x[0]))
return list(reversed(sorted(R, key=lambda x: x[0])))
def build_for_class(self, X, y, class_vector, P):
C = _select_candidates(P, self.num_candidates, class_vector, self.similarity_measure, X)
C = sorted(C, key=lambda x: x[0])
while True:
if C[0][0] == 1.0:
break
new_candidates = self.select_slaves(C, P, class_vector, self.num_candidates, X)
if len(new_candidates) == 0:
break
if new_candidates[0][0] < (1.0 + self.relative_improvement) * C[0][0]:
break
C = new_candidates
return C[0][1]
|
mit
|
gdetor/SI-RF-Structure
|
SOMLearning/DNF-2D-SOM-REF.py
|
1
|
7652
|
# Copyright (c) 2014, Georgios Is. Detorakis ([email protected]) and
# Nicolas P. Rougier ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# DNF-2D-SOM-REF.py generates the topographic maps of area 3b as it is
# described in [1].
import math as mt
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
from numpy.fft import rfft2, irfft2, ifftshift
rc('text', usetex=True)
rc('font', family='serif')
# Receptors regular grid. Jitter can be added.
def grid(n, xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0, noise=0.0):
x = np.linspace(xmin, xmax, n, endpoint=False)
y = np.linspace(ymin, ymax, n, endpoint=False)
X, Y = np.meshgrid(x, y)
X += np.random.uniform(-noise, noise, (n, n))
X = np.mod(X+1, 1)
Y += np.random.uniform(-noise, noise, (n, n))
Y = np.mod(Y+1, 1)
return X.ravel(), Y.ravel()
def g(x, sigma=1.0):
return np.exp(-0.5*(x/sigma)**2)
def prettyfloat(float):
return '%.3f' % float
def print_parameters(n, Rn, Ke, sigma_e, Ki, sigma_i, T, dt, tau, R_noise,
epochs):
print 'Net size: ', n, 'x', n, 'x', Rn, 'x', Rn
print 'Ke:', prettyfloat(Ke), 'sigma_e:', prettyfloat(sigma_e)
print 'Ki:', prettyfloat(Ki), 'sigma_i:', prettyfloat(sigma_i)
print 'Time:', prettyfloat(T), 'dt:', prettyfloat(dt)
print 'tau:', prettyfloat(tau)
print 'Noise:', prettyfloat(R_noise), 'Epochs:', epochs
def plot_activity(data):
plt.cla(), plt.clf()
plt.imshow(data, interpolation='nearest', cmap=plt.cm.jet)
plt.colorbar()
plt.xticks([])
plt.yticks([])
plt.draw()
def activity_size(data, th):
return sum(1 for i in data.flatten() if i > th)
if __name__ == '__main__':
np.random.seed(137)
# Parameters
# --------------------------------------------
Rn = 16 # Receptors count (Rn x Rn)
R_noise = 0.05 # Receptors placement noise
n = 32 # Neural field size (n x n)
T = 10.0 # 90.0 No of Euler's time discretization
ms = 0.001
dt = 100.0 * ms
lrate = 0.4 # 0.005 Learning rate
alpha = 0.1 # Time constant
tau = 1.00 # Synapse temporal decay
epochs = 35000 # Number of training epochs
W_min, W_max = 0.00, 1.00 # Weights min/max values for initialization
Ke = 960.0/(n*n) * 3.72 # Strength of lateral excitatory weights
sigma_e = 0.1 # Extent of lateral excitatory weights
Ki = 960.0/(n*n) * 2.40 # Strength of lateral inhibitory weights
sigma_i = 1.0 # Extent of lateral excitatory weights
# Neural field setup
# --------------------------------------------
U = np.random.uniform(0.00, 0.01, (n, n))
V = np.random.uniform(0.00, 0.01, (n, n))
# You have to replace this by something different!!!
folder_o = '/home/Local/SOM/Parameters/75Noise/'
W = np.random.uniform(W_min, W_max, (n*n, Rn*Rn))
# FFT implementation
# --------------------------------------------
mean = 0.5
x_inf, x_sup, y_inf, y_sup = 0.0, 1.0, 0.0, 1.0
X, Y = np.meshgrid(np.linspace(x_inf, x_sup, n+1)[1:],
np.linspace(y_inf, y_sup, n+1)[1:])
Dist = np.sqrt((X-mean)**2 + (Y-mean)**2)
We = Ke * g(Dist, sigma_e) * alpha
Wi = Ki * g(Dist, sigma_i) * alpha
print_parameters(n, Rn, Ke, sigma_e, Ki, sigma_i, T, dt, tau,
R_noise, epochs)
We_fft = rfft2(ifftshift(We[::-1, ::-1]))
Wi_fft = rfft2(ifftshift(Wi[::-1, ::-1]))
# Skin Receptors setup
# --------------------------------------------
R = np.zeros((Rn*Rn, 2))
R[:, 0], R[:, 1] = grid(Rn, noise=R_noise)
# np.save( folder_o+'gridxcoord', R[:,0] )
# np.save( folder_o+'gridycoord', R[:,1] )
# Samples generation
# --------------------------------------------
size = epochs
S = np.random.uniform(0, 1, (size, 2))
dX = np.abs(R[:, 0].reshape(1, Rn*Rn) - S[:, 0].reshape(size, 1))
dX = np.minimum(dX, 1-dX)
dY = np.abs(R[:, 1].reshape(1, Rn*Rn) - S[:, 1].reshape(size, 1))
dY = np.minimum(dY, 1-dY)
samples = np.sqrt(dX*dX+dY*dY)/mt.sqrt(2.0)
samples = g(samples, 0.08)
# Actual training
# --------------------------------------------
# plt.ion()
for e in range(epochs):
# Pick a sample
stimulus = samples[e]
# Computes field input accordingly
D = ((np.abs(W - stimulus)).sum(axis=-1))/float(Rn*Rn)
I = (1.0 - D.reshape(n, n)) * alpha
# Field simulation until convergence
for l in range(int(T/dt)):
Z = rfft2(V)
Le = irfft2(Z * We_fft, (n, n)).real
Li = irfft2(Z * Wi_fft, (n, n)).real
U += (-U + (Le - Li) + I) * tau * dt
V = np.maximum(U, 0.0)
# plot_activity(V)
# Learning
# --------
W -= lrate * (Le.ravel() * (W - stimulus).T).T
if e % 50 == 0:
print e
# np.save(folder_o+'weights'+str('%06d' % e), W)
# Field activity reset
# --------------------
U = np.random.uniform(0.00, 0.01, (n, n))
V = np.random.uniform(0.00, 0.01, (n, n))
# np.save(folder_o+'weights'+str('%06d' % epochs), W)
m = Rn
plt.figure(figsize=(10, 10)) # 13, 7
ax = plt.subplot(111, aspect=1)
R = np.zeros((n*m, n*m))
for j in range(n):
for i in range(n):
R[j*m:(j+1)*m, i*m:(i+1)*m] = W[j*n+i].reshape(m, m)
im = plt.imshow(R, interpolation='nearest', cmap=plt.cm.bone_r,
vmin=0, vmax=1)
plt.xticks(np.arange(0, n*m, m), [])
plt.yticks(np.arange(0, n*m, m), [])
plt.grid()
plt.show()
|
gpl-3.0
|
alekz112/statsmodels
|
statsmodels/sandbox/examples/try_quantile_regression1.py
|
33
|
1188
|
'''Example to illustrate Quantile Regression
Author: Josef Perktold
polynomial regression with systematic deviations above
'''
import numpy as np
from statsmodels.compat.python import zip
from scipy import stats
import statsmodels.api as sm
from statsmodels.regression.quantile_regression import QuantReg
sige = 0.1
nobs, k_vars = 500, 3
x = np.random.uniform(-1, 1, size=nobs)
x.sort()
exog = np.vander(x, k_vars+1)[:,::-1]
mix = 0.1 * stats.norm.pdf(x[:,None], loc=np.linspace(-0.5, 0.75, 4), scale=0.01).sum(1)
y = exog.sum(1) + mix + sige * (np.random.randn(nobs)/2 + 1)**3
p = 0.5
res_qr = QuantReg(y, exog).fit(p)
res_qr2 = QuantReg(y, exog).fit(0.1)
res_qr3 = QuantReg(y, exog).fit(0.75)
res_ols = sm.OLS(y, exog).fit()
params = [res_ols.params, res_qr2.params, res_qr.params, res_qr3.params]
labels = ['ols', 'qr 0.1', 'qr 0.5', 'qr 0.75']
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, '.', alpha=0.5)
for lab, beta in zip(['ols', 'qr 0.1', 'qr 0.5', 'qr 0.75'], params):
print('%-8s'%lab, np.round(beta, 4))
fitted = np.dot(exog, beta)
lw = 2
plt.plot(x, fitted, lw=lw, label=lab)
plt.legend()
plt.title('Quantile Regression')
plt.show()
|
bsd-3-clause
|
ujfjhz/vnpy
|
docker/dockerTrader/ctaStrategy/strategy/strategyKingKeltner.py
|
5
|
11842
|
# encoding: UTF-8
"""
基于King Keltner通道的交易策略,适合用在股指上,
展示了OCO委托和5分钟K线聚合的方法。
注意事项:
1. 作者不对交易盈利做任何保证,策略代码仅供参考
2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装
3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略
"""
from __future__ import division
from ..ctaBase import *
from ..ctaTemplate import CtaTemplate
import talib
import numpy as np
########################################################################
class KkStrategy(CtaTemplate):
"""基于King Keltner通道的交易策略"""
className = 'KkStrategy'
author = u'用Python的交易员'
# 策略参数
kkLength = 11 # 计算通道中值的窗口数
kkDev = 1.6 # 计算通道宽度的偏差
trailingPrcnt = 0.8 # 移动止损
initDays = 10 # 初始化数据所用的天数
fixedSize = 1 # 每次交易的数量
# 策略变量
bar = None # 1分钟K线对象
barMinute = EMPTY_STRING # K线当前的分钟
fiveBar = None # 1分钟K线对象
bufferSize = 100 # 需要缓存的数据的大小
bufferCount = 0 # 目前已经缓存了的数据的计数
highArray = np.zeros(bufferSize) # K线最高价的数组
lowArray = np.zeros(bufferSize) # K线最低价的数组
closeArray = np.zeros(bufferSize) # K线收盘价的数组
atrValue = 0 # 最新的ATR指标数值
kkMid = 0 # KK通道中轨
kkUp = 0 # KK通道上轨
kkDown = 0 # KK通道下轨
intraTradeHigh = 0 # 持仓期内的最高点
intraTradeLow = 0 # 持仓期内的最低点
buyOrderID = None # OCO委托买入开仓的委托号
shortOrderID = None # OCO委托卖出开仓的委托号
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'kkLength',
'kkDev']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'atrValue',
'kkMid',
'kkUp',
'kkDown']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(KkStrategy, self).__init__(ctaEngine, setting)
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 聚合为1分钟K线
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute:
if self.bar:
self.onBar(self.bar)
bar = CtaBarData()
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 如果当前是一个5分钟走完
if bar.datetime.minute % 5 == 0:
# 如果已经有聚合5分钟K线
if self.fiveBar:
# 将最新分钟的数据更新到目前5分钟线中
fiveBar = self.fiveBar
fiveBar.high = max(fiveBar.high, bar.high)
fiveBar.low = min(fiveBar.low, bar.low)
fiveBar.close = bar.close
# 推送5分钟线数据
self.onFiveBar(fiveBar)
# 清空5分钟线数据缓存
self.fiveBar = None
else:
# 如果没有缓存则新建
if not self.fiveBar:
fiveBar = CtaBarData()
fiveBar.vtSymbol = bar.vtSymbol
fiveBar.symbol = bar.symbol
fiveBar.exchange = bar.exchange
fiveBar.open = bar.open
fiveBar.high = bar.high
fiveBar.low = bar.low
fiveBar.close = bar.close
fiveBar.date = bar.date
fiveBar.time = bar.time
fiveBar.datetime = bar.datetime
self.fiveBar = fiveBar
else:
fiveBar = self.fiveBar
fiveBar.high = max(fiveBar.high, bar.high)
fiveBar.low = min(fiveBar.low, bar.low)
fiveBar.close = bar.close
#----------------------------------------------------------------------
def onFiveBar(self, bar):
"""收到5分钟K线"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 保存K线数据
self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
# 计算指标数值
self.atrValue = talib.ATR(self.highArray,
self.lowArray,
self.closeArray,
self.kkLength)[-1]
self.kkMid = talib.MA(self.closeArray, self.kkLength)[-1]
self.kkUp = self.kkMid + self.atrValue * self.kkDev
self.kkDown = self.kkMid - self.atrValue * self.kkDev
# 判断是否要进行交易
# 当前无仓位,发送OCO开仓委托
if self.pos == 0:
self.intraTradeHigh = bar.high
self.intraTradeLow = bar.low
self.sendOcoOrder(self.kkUp, self.kkDown, self.fixedSize)
# 持有多头仓位
elif self.pos > 0:
self.intraTradeHigh = max(self.intraTradeHigh, bar.high)
self.intraTradeLow = bar.low
orderID = self.sell(self.intraTradeHigh*(1-self.trailingPrcnt/100),
abs(self.pos), True)
self.orderList.append(orderID)
# 持有空头仓位
elif self.pos < 0:
self.intraTradeHigh = bar.high
self.intraTradeLow = min(self.intraTradeLow, bar.low)
orderID = self.cover(self.intraTradeLow*(1+self.trailingPrcnt/100),
abs(self.pos), True)
self.orderList.append(orderID)
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
# 多头开仓成交后,撤消空头委托
if self.pos > 0:
self.cancelOrder(self.shortOrderID)
if self.buyOrderID in self.orderList:
self.orderList.remove(self.buyOrderID)
if self.shortOrderID in self.orderList:
self.orderList.remove(self.shortOrderID)
# 反之同样
elif self.pos < 0:
self.cancelOrder(self.buyOrderID)
if self.buyOrderID in self.orderList:
self.orderList.remove(self.buyOrderID)
if self.shortOrderID in self.orderList:
self.orderList.remove(self.shortOrderID)
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def sendOcoOrder(self, buyPrice, shortPrice, volume):
"""
发送OCO委托
OCO(One Cancel Other)委托:
1. 主要用于实现区间突破入场
2. 包含两个方向相反的停止单
3. 一个方向的停止单成交后会立即撤消另一个方向的
"""
# 发送双边的停止单委托,并记录委托号
self.buyOrderID = self.buy(buyPrice, volume, True)
self.shortOrderID = self.short(shortPrice, volume, True)
# 将委托号记录到列表中
self.orderList.append(self.buyOrderID)
self.orderList.append(self.shortOrderID)
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktesting import *
from PyQt4 import QtCore, QtGui
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20130101')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3/10000) # 万0.3
engine.setSize(300) # 股指合约大小
engine.setPriceTick(0.2) # 股指最小价格变动
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
# 在引擎中创建策略对象
d = {}
engine.initStrategy(KkStrategy, d)
# 开始跑回测
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult()
|
mit
|
zentol/flink
|
flink-python/pyflink/fn_execution/coders.py
|
6
|
17774
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC
import datetime
import decimal
import pyarrow as pa
import pytz
from apache_beam.coders import Coder
from apache_beam.coders.coders import FastCoder, LengthPrefixCoder
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.portability import common_urns
from apache_beam.typehints import typehints
from pyflink.fn_execution import coder_impl as slow_coder_impl
try:
from pyflink.fn_execution import fast_coder_impl as coder_impl
except ImportError:
coder_impl = slow_coder_impl
from pyflink.fn_execution import flink_fn_execution_pb2
from pyflink.fn_execution.sdk_worker_main import pipeline_options
from pyflink.table.types import Row, TinyIntType, SmallIntType, IntType, BigIntType, BooleanType, \
FloatType, DoubleType, VarCharType, VarBinaryType, DecimalType, DateType, TimeType, \
LocalZonedTimestampType, RowType, RowField, to_arrow_type, TimestampType, ArrayType
FLINK_SCALAR_FUNCTION_SCHEMA_CODER_URN = "flink:coder:schema:scalar_function:v1"
FLINK_TABLE_FUNCTION_SCHEMA_CODER_URN = "flink:coder:schema:table_function:v1"
FLINK_SCALAR_FUNCTION_SCHEMA_ARROW_CODER_URN = "flink:coder:schema:scalar_function:arrow:v1"
__all__ = ['FlattenRowCoder', 'RowCoder', 'BigIntCoder', 'TinyIntCoder', 'BooleanCoder',
'SmallIntCoder', 'IntCoder', 'FloatCoder', 'DoubleCoder',
'BinaryCoder', 'CharCoder', 'DateCoder', 'TimeCoder',
'TimestampCoder', 'ArrayCoder', 'MapCoder', 'DecimalCoder', 'ArrowCoder']
class TableFunctionRowCoder(FastCoder):
"""
Coder for Table Function Row.
"""
def __init__(self, flatten_row_coder):
self._flatten_row_coder = flatten_row_coder
def _create_impl(self):
return coder_impl.TableFunctionRowCoderImpl(self._flatten_row_coder.get_impl())
def to_type_hint(self):
return typehints.List
@Coder.register_urn(FLINK_TABLE_FUNCTION_SCHEMA_CODER_URN, flink_fn_execution_pb2.Schema)
def _pickle_from_runner_api_parameter(schema_proto, unused_components, unused_context):
return TableFunctionRowCoder(FlattenRowCoder([from_proto(f.type)
for f in schema_proto.fields]))
def __repr__(self):
return 'TableFunctionRowCoder[%s]' % repr(self._flatten_row_coder)
def __eq__(self, other):
return (self.__class__ == other.__class__
and self._flatten_row_coder == other._flatten_row_coder)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._flatten_row_coder)
class FlattenRowCoder(FastCoder):
"""
Coder for Row. The decoded result will be flattened as a list of column values of a row instead
of a row object.
"""
def __init__(self, field_coders):
self._field_coders = field_coders
def _create_impl(self):
return coder_impl.FlattenRowCoderImpl([c.get_impl() for c in self._field_coders])
def is_deterministic(self):
return all(c.is_deterministic() for c in self._field_coders)
def to_type_hint(self):
return typehints.List
@Coder.register_urn(FLINK_SCALAR_FUNCTION_SCHEMA_CODER_URN, flink_fn_execution_pb2.Schema)
def _pickle_from_runner_api_parameter(schema_proto, unused_components, unused_context):
return FlattenRowCoder([from_proto(f.type) for f in schema_proto.fields])
def __repr__(self):
return 'FlattenRowCoder[%s]' % ', '.join(str(c) for c in self._field_coders)
def __eq__(self, other):
return (self.__class__ == other.__class__
and len(self._field_coders) == len(other._field_coders)
and [self._field_coders[i] == other._field_coders[i] for i in
range(len(self._field_coders))])
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._field_coders)
class RowCoder(FlattenRowCoder):
"""
Coder for Row.
"""
def __init__(self, field_coders):
super(RowCoder, self).__init__(field_coders)
def _create_impl(self):
return coder_impl.RowCoderImpl([c.get_impl() for c in self._field_coders])
def get_impl(self):
return self._create_impl()
def to_type_hint(self):
return Row
def __repr__(self):
return 'RowCoder[%s]' % ', '.join(str(c) for c in self._field_coders)
class CollectionCoder(FastCoder):
"""
Base coder for collection.
"""
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def _create_impl(self):
raise NotImplementedError
def get_impl(self):
return self._create_impl()
def is_deterministic(self):
return self._elem_coder.is_deterministic()
def to_type_hint(self):
return []
def __eq__(self, other):
return (self.__class__ == other.__class__
and self._elem_coder == other._elem_coder)
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, repr(self._elem_coder))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._elem_coder)
class ArrayCoder(CollectionCoder):
"""
Coder for Array.
"""
def __init__(self, elem_coder):
self._elem_coder = elem_coder
super(ArrayCoder, self).__init__(elem_coder)
def _create_impl(self):
return coder_impl.ArrayCoderImpl(self._elem_coder.get_impl())
class MapCoder(FastCoder):
"""
Coder for Map.
"""
def __init__(self, key_coder, value_coder):
self._key_coder = key_coder
self._value_coder = value_coder
def _create_impl(self):
return coder_impl.MapCoderImpl(self._key_coder.get_impl(), self._value_coder.get_impl())
def get_impl(self):
return self._create_impl()
def is_deterministic(self):
return self._key_coder.is_deterministic() and self._value_coder.is_deterministic()
def to_type_hint(self):
return {}
def __repr__(self):
return 'MapCoder[%s]' % ','.join([repr(self._key_coder), repr(self._value_coder)])
def __eq__(self, other):
return (self.__class__ == other.__class__
and self._key_coder == other._key_coder
and self._value_coder == other._value_coder)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash([self._key_coder, self._value_coder])
class DeterministicCoder(FastCoder, ABC):
"""
Base Coder for all deterministic Coders.
"""
def is_deterministic(self):
return True
def get_impl(self):
return self._create_impl()
class BigIntCoder(DeterministicCoder):
"""
Coder for 8 bytes long.
"""
def _create_impl(self):
return coder_impl.BigIntCoderImpl()
def to_type_hint(self):
return int
class TinyIntCoder(DeterministicCoder):
"""
Coder for Byte.
"""
def _create_impl(self):
return coder_impl.TinyIntCoderImpl()
def to_type_hint(self):
return int
class BooleanCoder(DeterministicCoder):
"""
Coder for Boolean.
"""
def _create_impl(self):
return coder_impl.BooleanCoderImpl()
def to_type_hint(self):
return bool
class SmallIntCoder(DeterministicCoder):
"""
Coder for Short.
"""
def _create_impl(self):
return coder_impl.SmallIntCoderImpl()
def to_type_hint(self):
return int
class IntCoder(DeterministicCoder):
"""
Coder for 4 bytes int.
"""
def _create_impl(self):
return coder_impl.IntCoderImpl()
def to_type_hint(self):
return int
class FloatCoder(DeterministicCoder):
"""
Coder for Float.
"""
def _create_impl(self):
return coder_impl.FloatCoderImpl()
def to_type_hint(self):
return float
class DoubleCoder(DeterministicCoder):
"""
Coder for Double.
"""
def _create_impl(self):
return coder_impl.DoubleCoderImpl()
def to_type_hint(self):
return float
class DecimalCoder(DeterministicCoder):
"""
Coder for Decimal.
"""
def __init__(self, precision, scale):
self.precision = precision
self.scale = scale
def _create_impl(self):
return coder_impl.DecimalCoderImpl(self.precision, self.scale)
def to_type_hint(self):
return decimal.Decimal
class BinaryCoder(DeterministicCoder):
"""
Coder for Byte Array.
"""
def _create_impl(self):
return coder_impl.BinaryCoderImpl()
def to_type_hint(self):
return bytes
class CharCoder(DeterministicCoder):
"""
Coder for Character String.
"""
def _create_impl(self):
return coder_impl.CharCoderImpl()
def to_type_hint(self):
return str
class DateCoder(DeterministicCoder):
"""
Coder for Date
"""
def _create_impl(self):
return coder_impl.DateCoderImpl()
def to_type_hint(self):
return datetime.date
class TimeCoder(DeterministicCoder):
"""
Coder for Time.
"""
def _create_impl(self):
return coder_impl.TimeCoderImpl()
def to_type_hint(self):
return datetime.time
class TimestampCoder(DeterministicCoder):
"""
Coder for Timestamp.
"""
def __init__(self, precision):
self.precision = precision
def _create_impl(self):
return coder_impl.TimestampCoderImpl(self.precision)
def to_type_hint(self):
return datetime.datetime
class LocalZonedTimestampCoder(DeterministicCoder):
"""
Coder for LocalZonedTimestamp.
"""
def __init__(self, precision, timezone):
self.precision = precision
self.timezone = timezone
def _create_impl(self):
return coder_impl.LocalZonedTimestampCoderImpl(self.precision, self.timezone)
def to_type_hint(self):
return datetime.datetime
class ArrowCoder(DeterministicCoder):
"""
Coder for Arrow.
"""
def __init__(self, schema, row_type, timezone):
self._schema = schema
self._row_type = row_type
self._timezone = timezone
def _create_impl(self):
return slow_coder_impl.ArrowCoderImpl(self._schema, self._row_type, self._timezone)
def to_type_hint(self):
import pandas as pd
return pd.Series
@Coder.register_urn(FLINK_SCALAR_FUNCTION_SCHEMA_ARROW_CODER_URN,
flink_fn_execution_pb2.Schema)
def _pickle_from_runner_api_parameter(schema_proto, unused_components, unused_context):
def _to_arrow_schema(row_type):
return pa.schema([pa.field(n, to_arrow_type(t), t._nullable)
for n, t in zip(row_type.field_names(), row_type.field_types())])
def _to_data_type(field_type):
if field_type.type_name == flink_fn_execution_pb2.Schema.TINYINT:
return TinyIntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.SMALLINT:
return SmallIntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.INT:
return IntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.BIGINT:
return BigIntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.BOOLEAN:
return BooleanType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.FLOAT:
return FloatType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.DOUBLE:
return DoubleType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.VARCHAR:
return VarCharType(0x7fffffff, field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.VARBINARY:
return VarBinaryType(0x7fffffff, field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.DECIMAL:
return DecimalType(field_type.decimal_info.precision,
field_type.decimal_info.scale,
field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.DATE:
return DateType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.TIME:
return TimeType(field_type.time_info.precision, field_type.nullable)
elif field_type.type_name == \
flink_fn_execution_pb2.Schema.LOCAL_ZONED_TIMESTAMP:
return LocalZonedTimestampType(field_type.local_zoned_timestamp_info.precision,
field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.TIMESTAMP:
return TimestampType(field_type.timestamp_info.precision, field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.ARRAY:
return ArrayType(_to_data_type(field_type.collection_element_type),
field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.TypeName.ROW:
return RowType(
[RowField(f.name, _to_data_type(f.type), f.description)
for f in field_type.row_schema.fields], field_type.nullable)
else:
raise ValueError("field_type %s is not supported." % field_type)
def _to_row_type(row_schema):
return RowType([RowField(f.name, _to_data_type(f.type)) for f in row_schema.fields])
timezone = pytz.timezone(pipeline_options.view_as(DebugOptions).lookup_experiment(
"table.exec.timezone"))
row_type = _to_row_type(schema_proto)
return ArrowCoder(_to_arrow_schema(row_type), row_type, timezone)
def __repr__(self):
return 'ArrowCoder[%s]' % self._schema
class PassThroughLengthPrefixCoder(LengthPrefixCoder):
"""
Coder which doesn't prefix the length of the encoded object as the length prefix will be handled
by the wrapped value coder.
"""
def __init__(self, value_coder):
super(PassThroughLengthPrefixCoder, self).__init__(value_coder)
def _create_impl(self):
return coder_impl.PassThroughLengthPrefixCoderImpl(self._value_coder.get_impl())
def __repr__(self):
return 'PassThroughLengthPrefixCoder[%s]' % self._value_coder
Coder.register_structured_urn(
common_urns.coders.LENGTH_PREFIX.urn, PassThroughLengthPrefixCoder)
type_name = flink_fn_execution_pb2.Schema
_type_name_mappings = {
type_name.TINYINT: TinyIntCoder(),
type_name.SMALLINT: SmallIntCoder(),
type_name.INT: IntCoder(),
type_name.BIGINT: BigIntCoder(),
type_name.BOOLEAN: BooleanCoder(),
type_name.FLOAT: FloatCoder(),
type_name.DOUBLE: DoubleCoder(),
type_name.BINARY: BinaryCoder(),
type_name.VARBINARY: BinaryCoder(),
type_name.CHAR: CharCoder(),
type_name.VARCHAR: CharCoder(),
type_name.DATE: DateCoder(),
type_name.TIME: TimeCoder(),
}
def from_proto(field_type):
"""
Creates the corresponding :class:`Coder` given the protocol representation of the field type.
:param field_type: the protocol representation of the field type
:return: :class:`Coder`
"""
field_type_name = field_type.type_name
coder = _type_name_mappings.get(field_type_name)
if coder is not None:
return coder
if field_type_name == type_name.ROW:
return RowCoder([from_proto(f.type) for f in field_type.row_schema.fields])
if field_type_name == type_name.TIMESTAMP:
return TimestampCoder(field_type.timestamp_info.precision)
if field_type_name == type_name.LOCAL_ZONED_TIMESTAMP:
timezone = pytz.timezone(pipeline_options.view_as(DebugOptions).lookup_experiment(
"table.exec.timezone"))
return LocalZonedTimestampCoder(field_type.local_zoned_timestamp_info.precision, timezone)
elif field_type_name == type_name.ARRAY:
return ArrayCoder(from_proto(field_type.collection_element_type))
elif field_type_name == type_name.MAP:
return MapCoder(from_proto(field_type.map_info.key_type),
from_proto(field_type.map_info.value_type))
elif field_type_name == type_name.DECIMAL:
return DecimalCoder(field_type.decimal_info.precision,
field_type.decimal_info.scale)
else:
raise ValueError("field_type %s is not supported." % field_type)
|
apache-2.0
|
subodhchhabra/pandashells
|
pandashells/test/p_plot_test.py
|
3
|
1066
|
#! /usr/bin/env python
from mock import patch, MagicMock
from unittest import TestCase
from pandashells.bin.p_plot import main
class MainTests(TestCase):
@patch('pandashells.bin.p_plot.argparse.ArgumentParser')
@patch('pandashells.bin.p_plot.arg_lib.add_args')
@patch('pandashells.bin.p_plot.io_lib.df_from_input')
@patch('pandashells.bin.p_plot.plot_lib.set_plot_styling')
@patch('pandashells.bin.p_plot.plot_lib.draw_xy_plot')
def test_plotting(
self, draw_xy_mock, set_plot_styling_mock, df_from_input_mock,
add_args_mock, ArgumentParserMock):
args = MagicMock()
parser = MagicMock(parse_args=MagicMock(return_value=args))
ArgumentParserMock.return_value = parser
df_from_input_mock.return_value = 'df'
main()
add_args_mock.assert_called_with(
parser, 'io_in', 'xy_plotting', 'decorating')
df_from_input_mock.assert_called_with(args)
set_plot_styling_mock.assert_called_with(args)
draw_xy_mock.assert_called_with(args, 'df')
|
bsd-2-clause
|
deepesch/scikit-learn
|
sklearn/linear_model/tests/test_omp.py
|
272
|
7752
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
bsd-3-clause
|
kinverarity1/emdpler_wrapper
|
emdpler_wrapper/emdpler.py
|
1
|
7509
|
import logging
import os
import shutil
import subprocess
import tempfile
import textwrap
import fortranformat as ff
import numpy
logger = logging.getLogger(__name__)
INPUT_TEMPLATE = """
DIPOLE CHARACTERISTIC PARAMETERS:
IFACT(Without-1/With-2 Displacement Current Factor) Format(I5)
{calc_disp_currs}
IDIPOL(VMD-1,HMD-2,HED-3)--ICOMP(Hr/Hx-1,Ephai/Hy-2,Hz-3,Ex-4,Ey-5) Format(2I5)
{IDIPOL}{ICOMP}
R(S-R Offset)--HT(Source Height)--Z(Receiver Level)(Format(3F9.2)
{src_rec_offset}{src_height}{rec_level}
FREQ1(Highest Freq.)------FREQL(Lowest Freq) ---Format(2F12.2)
{freq_h}{freq_l}
RI(Current-Ampere)-Area(Dipole Area)-RM(Dipole Moment)-Format(3F9.2)
{rec_curr}{rec_area}{rec_dip_moment}
X (X- HMD & HED)--Y (Y- HMD & HED)--(Receiver position w.r.t. Dipole)--Format(2F9.3)
{hx}{hy}
MODEL PARAMETERS:
NLYR-------Resistivity--and---Thickness----Format(10F8.3)
{nlyr}
{res}{thk}
"""[1:-1]
class results(dict):
def __init__(self, *args, **kwargs):
self.__dict__ = self
def vmd(src_rec_offset, src_height, rec_level,
res, thk=None,
nlayers=None,
freq_h=1e5, freq_l=10,
rec_curr=1, rec_area=1, rec_dip_moment=1,
hx=0, hy=0,
field_components=("Hz", ), calc_displ_currs=False,
emdpler_exe=None, print_input_files=False, print_output=False):
"""Run forward model for vertical magnetic dipole configuration (VMD).
Arguments:
src_rec_offset (float):
src_height (float):
rec_level (float):
res (array of floats): list of N resistivities for N model layers
thk (array of floats): list of N-1 thicknesses for N-1 model layers
(the last resistivity is for the underlying halfspace?)
field_components (list of strings): field components to calculate,
can be a list containing any number of the values
"Hz" (more to follow in the future).
calc_disp_currs (bool): include displacement currents
emdpler_exe (string): path to emdpler executable
"""
if emdpler_exe is None:
suffix = ""
if os.name == "nt":
suffix = ".exe"
emdpler_exe = os.path.join(os.path.dirname(__file__), "emdpler" + suffix)
assert os.path.isfile(emdpler_exe)
IFACT = {True: 2, False: 1}[calc_displ_currs]
if nlayers is None:
nlayers = len(res)
if thk is None:
thk = []
# TODO: loop and allow multiple runs of Emdpler to calculate more field components.
ICOMP = {"Hz": 3}[field_components[0]]
temp_dir = tempfile.mkdtemp(prefix="tmp_emdpler")
logger.info("Running modelling in %s" % temp_dir)
logger.debug("Creating input file...")
input_template = str(INPUT_TEMPLATE)
res_sl = ["%f" % r for r in res]
res_s = textwrap.wrap(" ".join(res_sl))
input_template = input_template.format(
calc_disp_currs = "%.0f" % IFACT,
IDIPOL = "1",
ICOMP = ff.FortranRecordWriter('(2I5)').write([ICOMP]),
src_rec_offset = ff.FortranRecordWriter('(3F9.2)').write([src_rec_offset]),
src_height = ff.FortranRecordWriter('(3F9.2)').write([src_height]),
rec_level = ff.FortranRecordWriter('(3F9.2)').write([rec_level]),
freq_h = ff.FortranRecordWriter('(2F12.2)').write([freq_h]),
freq_l = ff.FortranRecordWriter('(2F12.2)').write([freq_l]),
rec_curr = ff.FortranRecordWriter('(3F9.2)').write([rec_curr]),
rec_area = ff.FortranRecordWriter('(3F9.2)').write([rec_area]),
rec_dip_moment = ff.FortranRecordWriter('(3F9.2)').write([rec_dip_moment]),
hx = ff.FortranRecordWriter('(2F9.3)').write([hx]),
hy = ff.FortranRecordWriter('(2F9.3)').write([hy]),
nlyr = ff.FortranRecordWriter('(2I5)').write([nlayers]),
res = "\n".join(textwrap.wrap(" ".join([ff.FortranRecordWriter('(10F8.3)').write([r]) for r in res]))),
thk = "\n".join(textwrap.wrap(" ".join([ff.FortranRecordWriter('(10F8.3)').write([t]) for t in thk]))),
)
input_fn = os.path.join(temp_dir, "Input.in")
with open(input_fn, mode="w") as inf:
inf.write(input_template)
logger.debug("Wrote input file at " + input_fn)
if print_input_files:
print input_template
try:
pr_output = subprocess.check_output([emdpler_exe], cwd=temp_dir)
if print_output:
print pr_output
except:
raise
finally:
r1 = numpy.loadtxt(os.path.join(temp_dir, "RESULT1.DAT"))
r2 = numpy.loadtxt(os.path.join(temp_dir, "RESULT2.DAT"))
r3 = numpy.loadtxt(os.path.join(temp_dir, "RESULT3.DAT"))
shutil.rmtree(temp_dir)
logger.info("Finished modelling in %s" % temp_dir)
rfreq = results()
rindn = results()
assert (r1[:,0] == r3[:, 0]).all()
rfreq.freq = r1[:,0]
rfreq.ampl = r1[:,1]
rfreq.phase = fix_phases(r1[:,2])
rfreq.norm_ampl = r3[:,1]
rfreq.norm_phase = fix_phases(r3[:,2])
rindn.ind_num = r2[:,0]
rindn.ampl = r2[:,1]
rindn.phase = fix_phases(r2[:,2])
return rfreq, rindn
def plot_results(rfreq, rindn, fig=None, fign=None, figsize=(15, 6),
amplim=(None, None), phaselim=(None, None), gskws={}, pltkws={}):
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if fig is None:
fig = plt.figure(fign, figsize=figsize)
pltkws["color"] = pltkws.get("color", "k")
gskws["wspace"] = gskws.get("wspace", 0.3)
gskws["hspace"] = gskws.get("hspace", 0.3)
gs = gridspec.GridSpec(2, 3, **gskws)
ax = fig.add_subplot(gs[0])
ax.plot(rfreq.freq, rfreq.ampl, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_ylim(*amplim)
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Amplitude")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
ax = fig.add_subplot(gs[3])
ax.plot(rfreq.freq, rfreq.phase, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_ylim(*phaselim)
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Phase")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
ax = fig.add_subplot(gs[1])
ax.plot(rfreq.freq, rfreq.norm_ampl, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Normalized amplitude")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
ax = fig.add_subplot(gs[4])
ax.plot(rfreq.freq, rfreq.norm_phase, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Normalized phase [deg]")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
ax = fig.add_subplot(gs[2])
ax.plot(rindn.ind_num, rindn.ampl, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_ylim(*amplim)
ax.set_xlabel("Induction number")
ax.set_ylabel("Amplitude")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
ax = fig.add_subplot(gs[5])
ax.plot(rindn.ind_num, rindn.phase, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_ylim(*phaselim)
ax.set_xlabel("Induction number")
ax.set_ylabel("Phase [deg]")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
def fix_phases(arr):
for i in range(len(arr)):
while arr[i] > 180:
arr[i] = arr[i] - 180
while arr[i] < -180:
arr[i] = arr[i] + 180
return arr
|
mit
|
kylerbrown/scikit-learn
|
examples/ensemble/plot_forest_importances_faces.py
|
403
|
1519
|
"""
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
|
bsd-3-clause
|
devanshdalal/scikit-learn
|
sklearn/tests/test_dummy.py
|
186
|
17778
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
|
bsd-3-clause
|
rueberger/MJHMC
|
mjhmc/search/validation.py
|
1
|
2780
|
"""
Script to plot the autocorrelation and fit for previously found optimal parameters
"""
from mjhmc.search.objective import obj_func_helper
from mjhmc.figures.ac_fig import load_params
from mjhmc.samplers.markov_jump_hmc import ControlHMC, MarkovJumpHMC
from mjhmc.misc.distributions import RoughWell, Gaussian, MultimodalGaussian
from mjhmc.misc.plotting import plot_fit
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
def plot_all_best(custom_params=None):
""" Creates a plot with the autocorrelation and fit for each distribution and sampler
:param custom_params: dictionary of custom params will be used on all distributions
and samplers. if None uses the current best params for each
:returns: None
:rtype: None
"""
distributions = [
RoughWell(nbatch=200)
# Gaussian(ndims=10, nbatch=200),
# MultimodalGaussian(ndims=5, separation=1)
]
samplers = [
ControlHMC,
MarkovJumpHMC
]
with PdfPages("validation.pdf") as pdf:
for distribution in distributions:
# [control, mjhmc, lahmc]
if custom_params is None:
params = load_params(distribution)
else:
params = [custom_params] * 3
active_params = params[:-1]
for sampler, hparams in zip(samplers, active_params):
print "Now running for {} on {}".format(sampler, distribution)
cos_coef, n_grad_evals, exp_coef, autocor, _ = obj_func_helper(
sampler, distribution.reset(), False, hparams)
fig = plot_fit(n_grad_evals,
autocor,
exp_coef,
cos_coef,
'validation',
hparams,
save=False
)
pdf.savefig(fig)
def plot_comparison(samplers, params, distribution):
""" Plot a comparison between samplers and params
:param samplers: list of samplers to test
:param params: respective list of parameters for each sampler
:param distribution: distribution to compare on
:returns: None
:rtype: None
"""
for sampler, hparams in zip(samplers, params):
_, n_grad_evals, _, autocor, _ = obj_func_helper(
sampler, distribution.reset(), False, hparams)
plt.plot(n_grad_evals, autocor,
label="B: {}, eps: {}, M: {}".format(hparams['beta'],
hparams['epsilon'],
hparams['num_leapfrog_steps']))
plt.legend()
plt.savefig('comparison.pdf')
|
gpl-2.0
|
deepesch/scikit-learn
|
examples/linear_model/plot_ols_3d.py
|
350
|
2040
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
|
bsd-3-clause
|
edhuckle/statsmodels
|
examples/python/tsa_arma_0.py
|
22
|
4424
|
## Autoregressive Moving Average (ARMA): Sunspots data
from __future__ import print_function
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.graphics.api import qqplot
### Sunpots Data
print(sm.datasets.sunspots.NOTE)
dta = sm.datasets.sunspots.load_pandas().data
dta.index = pd.Index(sm.tsa.datetools.dates_from_range('1700', '2008'))
del dta["YEAR"]
dta.plot(figsize=(12,8));
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(dta, lags=40, ax=ax2)
arma_mod20 = sm.tsa.ARMA(dta, (2,0)).fit()
print(arma_mod20.params)
arma_mod30 = sm.tsa.ARMA(dta, (3,0)).fit()
print(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic)
print(arma_mod30.params)
print(arma_mod30.aic, arma_mod30.bic, arma_mod30.hqic)
# * Does our model obey the theory?
sm.stats.durbin_watson(arma_mod30.resid.values)
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax = arma_mod30.resid.plot(ax=ax);
resid = arma_mod30.resid
stats.normaltest(resid)
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
fig = qqplot(resid, line='q', ax=ax, fit=True)
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(resid.values.squeeze(), lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2)
r,q,p = sm.tsa.acf(resid.values.squeeze(), qstat=True)
data = np.c_[range(1,41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
# * This indicates a lack of fit.
# * In-sample dynamic prediction. How good does our model do?
predict_sunspots = arma_mod30.predict('1990', '2012', dynamic=True)
print(predict_sunspots)
ax = dta.ix['1950':].plot(figsize=(12,8))
ax = predict_sunspots.plot(ax=ax, style='r--', label='Dynamic Prediction')
ax.legend()
ax.axis((-20.0, 38.0, -4.0, 200.0))
def mean_forecast_err(y, yhat):
return y.sub(yhat).mean()
mean_forecast_err(dta.SUNACTIVITY, predict_sunspots)
#### Exercise: Can you obtain a better fit for the Sunspots model? (Hint: sm.tsa.AR has a method select_order)
#### Simulated ARMA(4,1): Model Identification is Difficult
from statsmodels.tsa.arima_process import arma_generate_sample, ArmaProcess
np.random.seed(1234)
# include zero-th lag
arparams = np.array([1, .75, -.65, -.55, .9])
maparams = np.array([1, .65])
# Let's make sure this model is estimable.
arma_t = ArmaProcess(arparams, maparams)
arma_t.isinvertible
arma_t.isstationary
# * What does this mean?
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(arma_t.generate_sample(nsample=50));
arparams = np.array([1, .35, -.15, .55, .1])
maparams = np.array([1, .65])
arma_t = ArmaProcess(arparams, maparams)
arma_t.isstationary
arma_rvs = arma_t.generate_sample(nsample=500, burnin=250, scale=2.5)
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(arma_rvs, lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(arma_rvs, lags=40, ax=ax2)
# * For mixed ARMA processes the Autocorrelation function is a mixture of exponentials and damped sine waves after (q-p) lags.
# * The partial autocorrelation function is a mixture of exponentials and dampened sine waves after (p-q) lags.
arma11 = sm.tsa.ARMA(arma_rvs, (1,1)).fit()
resid = arma11.resid
r,q,p = sm.tsa.acf(resid, qstat=True)
data = np.c_[range(1,41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
arma41 = sm.tsa.ARMA(arma_rvs, (4,1)).fit()
resid = arma41.resid
r,q,p = sm.tsa.acf(resid, qstat=True)
data = np.c_[range(1,41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
#### Exercise: How good of in-sample prediction can you do for another series, say, CPI
macrodta = sm.datasets.macrodata.load_pandas().data
macrodta.index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3'))
cpi = macrodta["cpi"]
##### Hint:
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax = cpi.plot(ax=ax)
ax.legend()
# P-value of the unit-root test, resoundly rejects the null of no unit-root.
print(sm.tsa.adfuller(cpi)[1])
|
bsd-3-clause
|
iismd17/scikit-learn
|
examples/neighbors/plot_digits_kde_sampling.py
|
251
|
2022
|
"""
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
|
bsd-3-clause
|
Vrekrer/magdynlab
|
experiments/PNA_FMR_2P.py
|
1
|
9450
|
# -*- coding: utf-8 -*-
import numpy
import time
import os
import magdynlab.instruments
import magdynlab.controllers
import magdynlab.data_types
import threading_decorators as ThD
import matplotlib.pyplot as plt
@ThD.gui_safe
def Plot_dPdH(Data):
f = plt.figure('VNA-FMR dP/dH', (5, 4))
if not(f.axes):
plt.subplot()
ax = f.axes[0]
if not(ax.lines):
ax.plot([],[],'b.-')
ax.set_xlim([Data['h'].min(), Data['h'].max()])
ax.set_ylim([-1E-10, 1E-10])
line = ax.lines[-1]
line.set_data(Data['h'], Data['dP/dH']*1000)
ax.set_xlabel('Field (Oe)')
ax.set_ylabel('dP/dH')
ax.grid(True)
#check Y scale
ymax = numpy.nan_to_num(Data['dP/dH']).max()*1000
ymin = numpy.nan_to_num(Data['dP/dH']).min()*1000
dy = ymax - ymin
yc = (ymax + ymin)/2
ymin, ymax = ax.get_ylim()
ymax = numpy.max([yc + dy*1.1/2, ymax])
ymin = numpy.min([yc - dy*1.1/2, ymin])
ax.set_ylim([ymin, ymax])
f.tight_layout()
f.canvas.draw()
@ThD.gui_safe
def Plot_ColorMap(Data):
f = plt.figure('PNA-FMR', (5, 4))
extent = numpy.array([Data['h'].min(),
Data['h'].max(),
Data['f'].min()/1E9,
Data['f'].max()/1E9])
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ax.clear()
ax.imshow(Data['ColorMap'].T,
aspect='auto',
origin='lower',
extent=extent)
ax.set_xlabel('Field (Oe)')
ax.set_ylabel('Freq (GHz)')
f.tight_layout()
f.canvas.draw()
class VNA_FMR_2P(object):
def __init__(self, ResouceNames={}):
logFile = os.path.expanduser('~/MagDynLab.log')
defaultRN = dict(RN_Kepco = 'GPIB0::6::INSTR',
RN_PNA = 'TCPIP::192.168.13.10::INSTR')
defaultRN.update(ResouceNames)
RN_Kepco = defaultRN['RN_Kepco']
RN_PNA = defaultRN['RN_PNA']
PowerSource = magdynlab.instruments.KEPCO_BOP(ResourceName=RN_Kepco,
logFile=logFile)
VNA = magdynlab.instruments.KEYSIGHT_PNA(ResourceName=RN_PNA,
logFile=logFile)
self.VNAC = magdynlab.controllers.VNA_Controller(VNA)
self.FC = magdynlab.controllers.FieldController(PowerSource)
self.FC.Kepco.Voltage = 15
#Experimental/plot data
self.Data = magdynlab.data_types.DataContainer()
self.Data.file_id = '.VNA_2P_Raw' #S11 vs hs vs fs
self.ColorMapData = magdynlab.data_types.DataContainer()
self.ColorMapData.file_id = '.VNA_ColorMap' #PAbs vs hs vs fs
self.Data_Osc = magdynlab.data_types.DataContainer()
self.Data_Osc.file_id = '.VNA_Osc_2P_Raw' #S11 vs h vs hosc
self.Data_dPdH = magdynlab.data_types.DataContainer()
self.Data_dPdH.file_id = '.VNA_dPdH' #dP/dH vs h (fixed freq)
self.Info = ''
def SetTraces(self):
self.VNAC.set_traces_SParameters_2P()
def PlotColorMap(self, i=None):
Pabs_ref = 1 \
- numpy.abs(self.Data['S11_Ref'])**2 \
- numpy.abs(self.Data['S21_Ref'])**2
if i is not None:
# Update only i column
Pabs = 1 \
- numpy.abs(self.Data['S11'][i])**2 \
- numpy.abs(self.Data['S21'][i])**2
if self.Data['h'][0] > self.Data['h'][-1]:
i = -1 - i
self.ColorMapData['ColorMap'][i] = Pabs - Pabs_ref
else:
Pabs = 1 \
- numpy.abs(self.Data['S11'])**2 \
- numpy.abs(self.Data['S21'])**2
self.ColorMapData['ColorMap'] = Pabs - Pabs_ref[None,:]
if self.Data['h'][0] > self.Data['h'][-1]:
self.ColorMapData['ColorMap'] = Pabs[::-1] - Pabs_ref[None,:]
Plot_ColorMap(self.ColorMapData)
def MeasureRef(self):
self.Data['S11_Ref'] = self.VNAC.getSData(0, True)
self.Data['S21_Ref'] = self.VNAC.getSData(1, False)
self.Data['S22_Ref'] = self.VNAC.getSData(2, False)
self.Data['S12_Ref'] = self.VNAC.getSData(3, False)
@ThD.as_thread
def Measure(self, fields, file_name, hold_time=0.0):
self.Data['h'] = fields
self.Data['f'] = self.VNAC.frequencies
data_shape = (len(self.Data['h']), len(self.Data['f']))
self.Data['S11'] = numpy.zeros(data_shape, dtype=complex)
self.Data['S21'] = numpy.zeros(data_shape, dtype=complex)
self.Data['S22'] = numpy.zeros(data_shape, dtype=complex)
self.Data['S12'] = numpy.zeros(data_shape, dtype=complex)
self.Data.info = self.Info
self.ColorMapData['h'] = self.Data['h']
self.ColorMapData['f'] = self.Data['f']
self.ColorMapData['ColorMap'] = numpy.zeros(data_shape, dtype=float)
self.ColorMapData['ColorMap'] += numpy.nan
self.ColorMapData.info = self.Info
# Loop for each field
for i, h in enumerate(fields):
self.FC.setField(h)
time.sleep(hold_time)
self.Data['S11'][i] = self.VNAC.getSData(0, True)
self.Data['S21'][i] = self.VNAC.getSData(1, False)
self.Data['S22'][i] = self.VNAC.getSData(2, False)
self.Data['S12'][i] = self.VNAC.getSData(3, False)
self.PlotColorMap(i)
ThD.check_stop()
if file_name is not None:
self.Data.save(file_name)
self.FC.TurnOff()
self.FC.Kepco.BEEP()
def PlotdPdH(self, i=None):
ss = self.Data_Osc['AC Field'] / self.Data_Osc['oscH']**2
Pabs = 1 \
- numpy.abs(self.Data_Osc['S11'])**2 \
- numpy.abs(self.Data_Osc['S21'])**2
A_Pabs = (Pabs * ss[None,:]).mean(axis=1)
if i is not None:
self.Data_dPdH['dP/dH'][i] = A_Pabs[i]
else:
self.Data_dPdH['dP/dH'] = A_Pabs
Plot_dPdH(self.Data_dPdH)
@ThD.as_thread
def Measure_dPdH(self, fields, freq, file_name,
oscH=5, osc_points_per_cicle=4, osc_repetitions=10,
hold_time=0.0, osc_hold_time=0.01, mode='Fast'):
self.VNAC.backup_sweep()
self.VNAC.VNA.Ch1.SetSweep(start=freq, stop=freq, np=1)
self.Data_Osc['h'] = fields
self.Data_Osc['f'] = freq
self.Data_Osc['osc_points_per_cicle'] = osc_points_per_cicle
self.Data_Osc['osc_repetitions'] = osc_repetitions
self.Data_Osc['oscH'] = oscH
oscR = osc_repetitions
oscN = osc_repetitions * osc_points_per_cicle
ss = numpy.sin(numpy.linspace(0, 2*oscR*numpy.pi, oscN))
self.Data_Osc['AC Field'] = ss * oscH
data_shape = (len(self.Data_Osc['h']), oscN)
self.Data_Osc['S11'] = numpy.zeros(data_shape, dtype=complex)
self.Data_Osc['S21'] = numpy.zeros(data_shape, dtype=complex)
if mode == 'Full':
self.Data_Osc['S22'] = numpy.zeros(data_shape, dtype=complex)
self.Data_Osc['S12'] = numpy.zeros(data_shape, dtype=complex)
self.Data_Osc.info = self.Info
self.Data_dPdH['h'] = fields
self.Data_dPdH['f'] = freq
self.Data_dPdH['dP/dH'] = numpy.zeros_like(fields) + numpy.nan
extra_info = ['',
'Frequency : %(f)0.6f GHz' % {'f':freq/1E9},
'Osc Field : %(oscH)0.1f Oe' % {'oscH':oscH},
'OscPoints : %(oscP)d' % {'oscP':osc_points_per_cicle},
'OscReps :%(oscR)d' % {'oscR':osc_repetitions},
'']
self.Data_dPdH.info = self.Info + '\n'.join(extra_info)
# Loop for each DC field
for hi, h in enumerate(fields):
self.FC.setField(h)
time.sleep(hold_time)
i0 = self.FC.Kepco.current
cs = i0 + self.Data_Osc['AC Field']/self.FC.HperOut
# Loop for each AC field
for ci, c in enumerate(cs):
self.FC.Kepco.current = c
time.sleep(osc_hold_time)
self.Data_Osc['S11'][hi,ci] = self.VNAC.getSData(0, True)
self.Data_Osc['S21'][hi,ci] = self.VNAC.getSData(1, False)
if mode == 'Full':
self.Data_Osc['S22'][hi,ci] = self.VNAC.getSData(2, False)
self.Data_Osc['S12'][hi,ci] = self.VNAC.getSData(3, False)
ThD.check_stop()
ThD.check_stop()
self.PlotdPdH(hi)
if file_name is not None:
self.Data_Osc.save(file_name)
self.Data_dPdH.savetxt(file_name + '.dPxH', keys=['h', 'dP/dH'])
self.VNAC.restore_sweep()
self.FC.TurnOff()
self.FC.Kepco.BEEP()
def Stop(self, TurnOff=True):
print('Stoping...')
self.FC.BEEP()
self.Measure.stop()
if self.Measure.thread is not None:
self.Measure.thread.join()
time.sleep(1)
self.FC.BEEP()
time.sleep(0.1)
self.FC.BEEP()
print('DONE')
if TurnOff:
print('Turning field OFF')
self.FC.TurnOff()
print('DONE')
def field_span(center, span, n_pts, hmin=0, hmax=20000):
crv = numpy.linspace(center-span/2, center+span/2, n_pts)
mask = (crv >= hmin) * (crv <= hmax)
return crv[mask]
|
mit
|
sourabhdattawad/BuildingMachineLearningSystemsWithPython
|
ch03/rel_post_20news.py
|
24
|
3903
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sklearn.datasets
import scipy as sp
new_post = \
"""Disk drive problems. Hi, I have a problem with my hard disk.
After 1 year it is working only sporadically now.
I tried to format it, but now it doesn't boot any more.
Any ideas? Thanks.
"""
print("""\
Dear reader of the 1st edition of 'Building Machine Learning Systems with Python'!
For the 2nd edition we introduced a couple of changes that will result into
results that differ from the results in the 1st edition.
E.g. we now fully rely on scikit's fetch_20newsgroups() instead of requiring
you to download the data manually from MLCOMP.
If you have any questions, please ask at http://www.twotoreal.com
""")
all_data = sklearn.datasets.fetch_20newsgroups(subset="all")
print("Number of total posts: %i" % len(all_data.filenames))
# Number of total posts: 18846
groups = [
'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware', 'comp.windows.x', 'sci.space']
train_data = sklearn.datasets.fetch_20newsgroups(subset="train",
categories=groups)
print("Number of training posts in tech groups:", len(train_data.filenames))
# Number of training posts in tech groups: 3529
labels = train_data.target
num_clusters = 50 # sp.unique(labels).shape[0]
import nltk.stem
english_stemmer = nltk.stem.SnowballStemmer('english')
from sklearn.feature_extraction.text import TfidfVectorizer
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5,
stop_words='english', decode_error='ignore'
)
vectorized = vectorizer.fit_transform(train_data.data)
num_samples, num_features = vectorized.shape
print("#samples: %d, #features: %d" % (num_samples, num_features))
# samples: 3529, #features: 4712
from sklearn.cluster import KMeans
km = KMeans(n_clusters=num_clusters, n_init=1, verbose=1, random_state=3)
clustered = km.fit(vectorized)
print("km.labels_=%s" % km.labels_)
# km.labels_=[ 6 34 22 ..., 2 21 26]
print("km.labels_.shape=%s" % km.labels_.shape)
# km.labels_.shape=3529
from sklearn import metrics
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
# Homogeneity: 0.400
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
# Completeness: 0.206
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
# V-measure: 0.272
print("Adjusted Rand Index: %0.3f" %
metrics.adjusted_rand_score(labels, km.labels_))
# Adjusted Rand Index: 0.064
print("Adjusted Mutual Information: %0.3f" %
metrics.adjusted_mutual_info_score(labels, km.labels_))
# Adjusted Mutual Information: 0.197
print(("Silhouette Coefficient: %0.3f" %
metrics.silhouette_score(vectorized, labels, sample_size=1000)))
# Silhouette Coefficient: 0.006
new_post_vec = vectorizer.transform([new_post])
new_post_label = km.predict(new_post_vec)[0]
similar_indices = (km.labels_ == new_post_label).nonzero()[0]
similar = []
for i in similar_indices:
dist = sp.linalg.norm((new_post_vec - vectorized[i]).toarray())
similar.append((dist, train_data.data[i]))
similar = sorted(similar)
print("Count similar: %i" % len(similar))
show_at_1 = similar[0]
show_at_2 = similar[int(len(similar) / 10)]
show_at_3 = similar[int(len(similar) / 2)]
print("=== #1 ===")
print(show_at_1)
print()
print("=== #2 ===")
print(show_at_2)
print()
print("=== #3 ===")
print(show_at_3)
|
mit
|
Aratz/pyurdme
|
examples/yeast_polarization/G_protein_cycle.py
|
5
|
4520
|
#!/usr/bin/env python
""" pyURDME model file for the polarization 1D example. """
import os
import sys
import pyurdme
import dolfin
import math
import matplotlib.pyplot as plt
import numpy
# Sub domain for Periodic boundary condition
class PeriodicBoundary1D(dolfin.SubDomain):
def __init__(self, a=0.0, b=1.0):
""" 1D domain from a to b. """
dolfin.SubDomain.__init__(self)
self.a = a
self.b = b
def inside(self, x, on_boundary):
return not bool((dolfin.near(x[0], self.b)) and on_boundary)
def map(self, x, y):
if dolfin.near(x[0], self.b):
y[0] = self.a + (x[0] - self.b)
class PheromoneGradient(pyurdme.URDMEDataFunction):
def __init__(self, a=0.0, b=1.0, L_min=0, L_max=4, MOLAR=1.0):
""" 1D domain from a to b. """
pyurdme.URDMEDataFunction.__init__(self, name="PheromoneGradient")
self.a = a
self.b = b
self.L_min = L_min
self.L_max = L_max
self.MOLAR = MOLAR
def map(self, x):
ret = ((self.L_max - self.L_min) * 0.5 * (1 + math.cos(0.5*x[0])) + self.L_min) * self.MOLAR
return ret
class G_protein_cycle_1D(pyurdme.URDMEModel):
def __init__(self,model_name="G_protein_cycle_1D"):
pyurdme.URDMEModel.__init__(self,model_name)
# Species
# R RL G Ga Gbg Gd
R = pyurdme.Species(name="R", diffusion_constant=0.01)
RL = pyurdme.Species(name="RL", diffusion_constant=0.01)
G = pyurdme.Species(name="G", diffusion_constant=0.01)
Ga = pyurdme.Species(name="Ga", diffusion_constant=0.01)
Gbg = pyurdme.Species(name="Gbg",diffusion_constant=0.01)
Gd = pyurdme.Species(name="Gd", diffusion_constant=0.01)
self.add_species([R,RL,G,Ga,Gbg,Gd])
L = 4*3.14159
NUM_VOXEL = 200
MOLAR=6.02e-01*((L/NUM_VOXEL)**3)
self.mesh = pyurdme.URDMEMesh.generate_interval_mesh(nx=NUM_VOXEL, a=-2*3.14159, b=2*3.14159, periodic=True)
SA = pyurdme.Parameter(name="SA" ,expression=201.056)
V = pyurdme.Parameter(name="V" ,expression=33.5)
k_RL = pyurdme.Parameter(name="k_RL" ,expression=2e-03/MOLAR)
k_RLm = pyurdme.Parameter(name="k_RLm" ,expression=1e-02)
k_Rs = pyurdme.Parameter(name="k_Rs" ,expression="4.0/SA")
k_Rd0 = pyurdme.Parameter(name="k_Rd0" ,expression=4e-04)
k_Rd1 = pyurdme.Parameter(name="k_Rd1" ,expression=4e-04)
k_G1 = pyurdme.Parameter(name="k_G1" ,expression="1.0*SA")
k_Ga = pyurdme.Parameter(name="k_Ga" ,expression="1e-06*SA")
k_Gd = pyurdme.Parameter(name="k_Gd" ,expression=0.1)
self.add_parameter([SA,V,k_RL,k_RLm,k_Rs,k_Rd0,k_Rd1,k_G1,k_Ga,k_Gd])
# Add Data Function to model the mating pheromone gradient.
self.add_data_function(PheromoneGradient(a=-2*3.14159, b=2*3.14159, MOLAR=MOLAR))
# Reactions
R0 = pyurdme.Reaction(name="R0", reactants={}, products={R:1}, massaction=True, rate=k_Rs)
R1 = pyurdme.Reaction(name="R1", reactants={R:1}, products={}, massaction=True, rate=k_Rd0)
R2 = pyurdme.Reaction(name="R2", reactants={R:1}, products={RL:1}, propensity_function="k_RL*R*PheromoneGradient/vol")
R3 = pyurdme.Reaction(name="R3", reactants={RL:1}, products={R:1}, massaction=True, rate=k_RLm)
R4 = pyurdme.Reaction(name="R4", reactants={RL:1}, products={}, massaction=True, rate=k_RLm)
R5 = pyurdme.Reaction(name="R5", reactants={G:1}, products={Ga:1, Gbg:1}, propensity_function="k_Ga*RL*G/vol")
R6 = pyurdme.Reaction(name="R6", reactants={Ga:1}, products={Gd:1}, massaction=True, rate=k_Ga)
R7 = pyurdme.Reaction(name="R7", reactants={Gd:1, Gbg:1}, products={G:1}, massaction=True, rate=k_G1)
self.add_reaction([R0,R1,R2,R3,R4,R5,R6,R7])
# Distribute molecules randomly over the mesh according to their initial values
self.set_initial_condition_scatter({R:10000})
self.set_initial_condition_scatter({G:10000})
self.timespan(range(201))
if __name__=="__main__":
""" Dump model to a file. """
model = G_protein_cycle_1D()
result = model.run()
x_vals = model.mesh.coordinates()[:, 0]
G = result.get_species("G", timepoints=49)
Gbg = result.get_species("Gbg", timepoints=49)
plt.plot(x_vals, Gbg)
plt.title('Gbg at t=49')
plt.xlabel('Space')
plt.ylabel('Number of Molecules')
plt.show()
|
gpl-3.0
|
mingdachen/cuda-convnet2
|
convdata.py
|
174
|
14675
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
|
apache-2.0
|
pompiduskus/scikit-learn
|
sklearn/datasets/samples_generator.py
|
45
|
56433
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
bsd-3-clause
|
bousmalis/models
|
autoencoder/AutoencoderRunner.py
|
12
|
1660
|
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder_models.Autoencoder import Autoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = Autoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
|
apache-2.0
|
miha-skalic/ITEKA
|
qt_design/__init__.py
|
1
|
14477
|
"""
Main window and functions for ITEKA
"""
# windows
from qt_design.main_ui import *
from qt_design.widget_windows import *
import calculations
import qt_design.reaction_plots as reaction_plots
from qt_design.calc_functions import *
import pickle
import sys
import os
import PyQt4.QtCore as qc
QtCore.QLocale.setDefault(QtCore.QLocale('en_US'))
class DefaultWindow(Ui_MainWindow, DsMethods, SsMethods):
def preload_2subs_data(self, pname):
"""
Sets up naming and reaction rates
"""
dlg = PreloadTwoSubs()
if not (dlg.SetBName.text() and dlg.SetAName.text()):
WarningMessage(message="You need to name your substrates.")
return -1
self.reaction_data = calculations.TwoSubstrates(pname, dlg.SetAName.text(),
dlg.SetBName.text(),
is_itc=dlg.IsITC.isChecked(),
arate=dlg.ASpBox.value(),
brate=dlg.BSpBox.value())
def batch_run(self):
"""
Creates fitting and saves the results
"""
dlg = BatchRun(self.reaction_data)
dlg.exec_()
def load_data(self):
"""
Choose to load for single or multiple substrates
"""
if self.reaction_data.is_single():
self.load_data_ss()
else:
self.load_data_ds()
self.post_load()
def post_load(self):
"""
Post data loding changes
"""
for button in self.GraphBG2.buttons():
button.show()
for button in self.GraphBG1.buttons():
button.show()
self.Leg1Box.show()
self.label.show()
self.label_2.show()
self.Extrap1.show()
self.Extrap2.show()
if self.reaction_data.is_single():
self.Leg2Box.show()
MainWindow.setWindowTitle("{} - ITEKA".format(self.reaction_data.name))
self.changegraph_layout()
self.BatchButton.setEnabled(True)
self.SolExpButton.setEnabled(True)
self.actionSave_input_data.setEnabled(True)
self.actionExport_data_xls.setEnabled(True)
self.NewProButton.setEnabled(False)
self.LoadButton.setEnabled(True)
self.RewBut.setEnabled(True)
def start_project(self):
"""
Project setup
"""
dlg = StartProject()
if dlg.exec_() == QtGui.QDialog.Accepted:
# check for name
if not dlg.ProjectName.text().strip():
WarningMessage(message='You need a title for your project.')
return
# check for 2 substrates
if dlg.radioButton_2.isChecked():
if self.preload_2subs_data(dlg.ProjectName.text().strip()) == -1:
return
else:
self.reaction_data = calculations.OneSubstrate(dlg.ProjectName.text().strip(),
cunit=dlg.ConcVal.text(),
tunit=dlg.TimeVal.text())
# enable new options
self.LoadButton.setEnabled(True)
self.NewProButton.setEnabled(False)
# setup data object
self.project_name = dlg.ProjectName.text().strip()
MainWindow.setWindowTitle("{} - ITEKA".format(self.project_name))
def reset_project(self):
"""
Clears current work in project
"""
if self.reaction_data is None:
return
message = 'Your unsaved data will be lost. Continue?'
msgd = QtGui.QMessageBox
if msgd.question(None, 'Warning!', message, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) \
== QtGui.QMessageBox.No:
return -1
MainWindow.setWindowTitle("ITEKA")
self.fitparams = None
self.reaction_data = None
self.project_name = None
self.NewProButton.setEnabled(True)
self.LoadButton.setEnabled(False)
self.BatchButton.setEnabled(False)
self.SolExpButton.setEnabled(False)
self.actionSave_input_data.setEnabled(False)
self.actionExport_data_xls.setEnabled(False)
self.RewBut.setEnabled(False)
# flush graphs
for button in self.GraphBG1.buttons():
button.hide()
for button in self.GraphBG2.buttons():
button.hide()
self.Leg1Box.hide()
self.Leg2Box.hide()
self.label.hide()
self.label_2.hide()
self.Extrap1.hide()
self.Extrap2.hide()
self.Graph1Sel.setCurrentIndex(0)
self.Graph2Sel.setCurrentIndex(1)
self.fig.clear()
self.canvas.hide()
self.mpl_toolbar.hide()
self.Graph1Sel.hide()
self.Graph2Sel.hide()
def changegraph_layout(self):
"""
Change plot buttons display and replot graphs
"""
# Display
if (not self.reaction_data.is_single()) and (self.Graph1Sel.currentIndex() in [1, 2]):
for button in self.GraphBG1.buttons():
button.setEnabled(True)
for button in self.GraphBG2.buttons():
button.setEnabled(True)
elif not self.reaction_data.is_single():
for button in self.GraphBG1.buttons():
button.setEnabled(False)
for button in self.GraphBG2.buttons():
button.setEnabled(False)
else:
for button in self.GraphBG1.buttons():
button.setEnabled(self.Graph1Sel.currentIndex() in [1, 2])
for button in self.GraphBG2.buttons():
button.setEnabled(self.Graph2Sel.currentIndex() in [1, 2])
# unchecking
if self.RepParab1.isChecked():
self.RepLin1.setChecked(False)
if self.RepParab2.isChecked():
self.RepLin2.setChecked(False)
if self.GlobParab1.isChecked():
self.GlobLin1.setChecked(False)
if self.GlobParab2.isChecked():
self.GlobLin2.setChecked(False)
# SS and DS difference
if self.reaction_data.is_single():
self.plot_basicgraphs()
else:
self.plot_basicgraphs2()
def plot_basicgraphs(self):
"""
Plots the graphs to be displayed in main window.
"""
self.fig.clear()
self.axes = self.fig.add_subplot(121)
self.axes2 = self.fig.add_subplot(122)
repfit1 = 1 if self.RepLin1.isChecked() else 2 if self.RepParab1.isChecked() else 0
repfit2 = 1 if self.RepLin2.isChecked() else 2 if self.RepParab2.isChecked() else 0
globfit1 = 1 if self.GlobLin1.isChecked() else 2 if self.GlobParab1.isChecked() else 0
globfit2 = 1 if self.GlobLin2.isChecked() else 2 if self.GlobParab2.isChecked() else 0
reaction_plots.plot_singlegraph(self.reaction_data, self.axes, self.Graph1Sel.currentIndex(),
legend='Replicate ' if self.Leg1Box.isChecked() else '',
rep_fit=repfit1,
global_fit=globfit1,
extrapolation = self.Extrap1.value())
reaction_plots.plot_singlegraph(self.reaction_data, self.axes2, self.Graph2Sel.currentIndex(),
legend='Replicate ' if self.Leg2Box.isChecked() else '',
rep_fit=repfit2,
global_fit=globfit2,
extrapolation = self.Extrap2.value())
# New options
self.Graph1Sel.show()
self.Graph2Sel.show()
# Draw the plots
self.canvas.draw()
self.canvas.show()
self.mpl_toolbar.show()
def plot_basicgraphs2(self):
"""
Plotting for two substrates examples!
"""
self.Graph1Sel.show()
self.fig.clear()
repfit1 = 1 if self.RepLin1.isChecked() else 2 if self.RepParab1.isChecked() else 0
repfit2 = 1 if self.RepLin2.isChecked() else 2 if self.RepParab2.isChecked() else 0
globfit1 = 1 if self.GlobLin1.isChecked() else 2 if self.GlobParab1.isChecked() else 0
globfit2 = 1 if self.GlobLin2.isChecked() else 2 if self.GlobParab2.isChecked() else 0
self.axes = self.fig.add_subplot(121)
self.axes2 = self.fig.add_subplot(122)
reaction_plots.plot_singlegraph(self.reaction_data.get_repres(True), self.axes, self.Graph1Sel.currentIndex(),
legend='Set ' if self.Leg1Box.isChecked() else '', rep_fit=repfit1,
global_fit=globfit1, sname=self.reaction_data.nameA,
extrapolation = self.Extrap1.value())
reaction_plots.plot_singlegraph(self.reaction_data.get_repres(False), self.axes2, self.Graph1Sel.currentIndex(),
legend='Set ' if self.Leg1Box.isChecked() else '', rep_fit=repfit2,
global_fit=globfit2, sname=self.reaction_data.nameB,
extrapolation = self.Extrap2.value())
# Draw the plots
self.canvas.draw()
self.canvas.show()
self.mpl_toolbar.show()
def load_from_file(self):
"""
Loads pickle file
"""
if self.reset_project() == -1:
return
dialog = QtGui.QFileDialog()
filename = QtGui.QFileDialog.getOpenFileName(dialog, 'Open File', os.getenv('HOME'), 'Pickle (*.pkl)')
if filename:
self.reset_project()
self.reaction_data = pickle.load(open(filename, 'rb'))
self.post_load()
def save_to_file(self):
"""
Saves data pickle
"""
dialog = QtGui.QFileDialog()
filename = QtGui.QFileDialog.getSaveFileName(dialog, 'Save File', os.getenv('HOME'), 'Pickle (*.pkl)')
if filename:
pickle.dump(self.reaction_data, open(filename, 'wb'))
def save_to_excel(self):
"""
Saves data to excel spreadsheet
"""
if self.reaction_data.is_single():
if self.reaction_data.get_replicates() == 0:
WarningMessage(message="You need to imput some data first.")
dialog = QtGui.QFileDialog()
filename = QtGui.QFileDialog.getSaveFileName(dialog, 'Save File', os.getenv('HOME'), 'Excel Workbook (*.xlsx)')
if filename:
calculations.data_to_xls(self.reaction_data, filename)
def change_selection(self):
"""
Switch for button combobox
"""
if self.RepLin1.isChecked():
self.RepParab1.setChecked(False)
if self.RepLin2.isChecked():
self.RepParab2.setChecked(False)
if self.GlobLin1.isChecked():
self.GlobParab1.setChecked(False)
if self.GlobLin2.isChecked():
self.GlobParab2.setChecked(False)
def viewdata(self):
"""
Window for viewing and delating data points
"""
dlg = PointsView(self.reaction_data)
if dlg.exec_():
self.reaction_data = dlg.reac_data
self.changegraph_layout()
def __init__(self):
Ui_MainWindow.__init__(self)
self.setupUi(MainWindow)
# data object inicialization
self.reaction_data = None
self.fitparams = None
self.axes = None
self.axes2 = None
self.fitresults = None
self.project_name = None
self.savefolder = None
# matplotlib canvas
self.fig = Figure()
self.canvas = FigureCanvas(self.fig)
self.mpl_toolbar = NavigationToolbar(self.canvas, self.centralwidget)
self.verticalLayout_2.addWidget(self.canvas)
self.verticalLayout_2.addWidget(self.mpl_toolbar)
self.verticalLayout_2.addWidget(self.mpl_toolbar)
# Hide widgets
for button in self.GraphBG1.buttons():
button.hide()
for button in self.GraphBG2.buttons():
button.hide()
self.Leg1Box.hide()
self.Leg2Box.hide()
self.Graph1Sel.hide()
self.Graph2Sel.hide()
self.mpl_toolbar.hide()
self.label.hide()
self.label_2.hide()
self.Extrap1.hide()
self.Extrap2.hide()
self.canvas.hide()
self.StatusLab.hide()
# gray out the buttons
self.LoadButton.setEnabled(False)
self.SolExpButton.setEnabled(False)
self.BatchButton.setEnabled(False)
self.actionSave_input_data.setEnabled(False)
self.actionExport_data_xls.setEnabled(False)
self.RewBut.setEnabled(False)
# connect buttons to process
self.NewProButton.clicked.connect(self.start_project)
self.LoadButton.clicked.connect(self.load_data)
self.BatchButton.clicked.connect(self.batch_run)
self.SolExpButton.clicked.connect(self.exploresol)
self.RewBut.clicked.connect(self.viewdata)
# connect combobox to changes
self.Graph1Sel.activated.connect(self.changegraph_layout)
self.Graph2Sel.setCurrentIndex(1)
self.Graph2Sel.activated.connect(self.changegraph_layout)
self.GraphBG1.buttonClicked.connect(self.changegraph_layout)
self.GraphBG2.buttonClicked.connect(self.changegraph_layout)
for button in [self.RepLin1, self.RepLin2, self.GlobLin1, self.GlobLin2]:
button.stateChanged.connect(self.change_selection)
self.Leg1Box.stateChanged.connect(self.changegraph_layout)
self.Leg2Box.stateChanged.connect(self.changegraph_layout)
self.Extrap1.valueChanged.connect(self.changegraph_layout)
self.Extrap2.valueChanged.connect(self.changegraph_layout)
# Menu triggers
self.actionNew_project.triggered.connect(self.reset_project)
self.actionLoad_input_data.triggered.connect(self.load_from_file)
self.actionSave_input_data.triggered.connect(self.save_to_file)
self.actionExport_data_xls.triggered.connect(self.save_to_excel)
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = DefaultWindow()
MainWindow.show()
sys.exit(app.exec_())
|
gpl-3.0
|
ChenguangZhang/Python_CFD_Course
|
lesson_08.py
|
1
|
1434
|
#!/usr/bin/python
import time, sys
import numpy as np
import matplotlib.pyplot as plt
# About: solver the linear advection equation
# i.e. : u_t + cx*u_x + cy*uy = 0
# simulation parameters
lx = 2.0
ly = 2.0
nx = 80
ny = 80
dx = lx/nx
dy = ly/ny
nt = 100
nu = 0.01
sigma = 0.05
dt = sigma*dx*dy/nu
cellx = dx/2+dx * np.arange(nx) # coordinate of cell center
celly = dy/2+dy * np.arange(ny)
cellxx, cellyy = np.meshgrid(cellx,celly)
# initial condition
u = np.ones((nx,ny))
v = np.ones((nx,ny))
#u[0.5/dx:1/dx+1, 0.5/dy:1/dy+1]=2
#v[0.5/dx:1/dx+1, 0.5/dy:1/dy+1]=2
u = np.sin(np.pi*cellxx) * np.sin(np.pi*cellyy)
v = np.sin(np.pi*cellxx) * np.sin(np.pi*cellyy)
#
un = np.ones((nx,ny))
vn = np.ones((nx,ny))
for n in range(nt):
for i in range(-1,nx-1):
for j in range(-1,ny-1):
un[i,j] = u[i, j] - (u[i,j]*dt/dx*(u[i,j] - u[i-1,j]))-(v[i,j]*dt/dy*(u[i,j]-u[i,j-1])) + \
+ dt*nu/dx**2*(-2*u[i,j] + u[i+1,j]+u[i-1,j]) + dt*nu/dy**2*(-2*u[i,j]+u[i,j-1]+u[i,j+1])
vn[i,j] = v[i, j] - (u[i,j]*dt/dx*(v[i,j] - v[i-1,j]))-(u[i,j]*dt/dy*(v[i,j]-v[i,j-1])) + \
+ dt*nu/dx**2*(-2*u[i,j] + u[i+1,j]+u[i-1,j]) + dt*nu/dy**2*(-2*u[i,j]+u[i,j-1]+u[i,j+1])
un,u = u,un
vn,v = v,vn
if n%500 == 0:
plt.clf()
plt.pcolormesh(cellxx,cellyy, u)
plt.axis('equal')
plt.title('%03d'%n)
plt.savefig('%03d.png'%n)
|
gpl-2.0
|
drewabbot/kaggle-seizure-prediction
|
phil/utils.py
|
1
|
1218
|
from numpy import power
from sklearn.preprocessing import MinMaxScaler as MMS
def kpca_preprocess_features(X):
mms = MMS(feature_range=(-0.5, 0.5))
n_rows, n_cols = X.shape
for j in range(n_cols):
col_max = max(X[:,j])
col_min = min(X[:,j])
if col_max <= 1.0 and col_min >= -1.0:
pass
else:
X[:,j] = power(X[:,j],0.125)
X = mms.fit_transform(X)
return X
def get_skip_interval(X):
D1_len = 1135680 + 56784 + 1187732
acceptable_num_samples = D1_len / 100
skip_interval = len(X)/acceptable_num_samples
return skip_interval
from numpy import zeros
import gc
def kpca_incremental_transform(kpca, X):
increment = 10000
X_out = zeros((len(X),kpca.n_components))
n_increments = len(X)/increment + 1
for i in range(n_increments):
inc_slice = slice(increment*i,increment*(i+1))
if len(X[inc_slice]) > 0:
X_out[inc_slice,:] = kpca.transform(X[inc_slice])
del X; gc.collect()
return X_out
from numpy import linspace
def tr(lo,hi,n):
return 10.**linspace(lo,hi,n)
def scorer(estimator, X, Y):
preds = estimator.predict_proba(X)[:,1]
return roc_auc_score(Y, preds)
|
mit
|
hainm/statsmodels
|
tools/examples_rst.py
|
30
|
5894
|
#! /usr/bin/env python
import os
import sys
import re
import subprocess
import pickle
from StringIO import StringIO
# 3rd party
from matplotlib import pyplot as plt
# Ours
import hash_funcs
#----------------------------------------------------
# Globals
#----------------------------------------------------
# these files do not get made into .rst files because of
# some problems, they may need a simple cleaning up
exclude_list = ['run_all.py',
# these need to be cleaned up
'example_ols_tftest.py',
'example_glsar.py',
'example_ols_table.py',
#not finished yet
'example_arima.py',
'try_wls.py']
file_path = os.path.dirname(__file__)
docs_rst_dir = os.path.realpath(os.path.join(file_path,
'../docs/source/examples/generated/'))
example_dir = os.path.realpath(os.path.join(file_path,
'../examples/'))
def check_script(filename):
"""
Run all the files in filelist from run_all. Add any with problems
to exclude_list and return it.
"""
file_to_run = "python -c\"import warnings; "
file_to_run += "warnings.simplefilter('ignore'); "
file_to_run += "from matplotlib import use; use('Agg'); "
file_to_run += "execfile(r'%s')\"" % os.path.join(example_dir, filename)
proc = subprocess.Popen(file_to_run, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
#NOTE: use communicate to wait for process termination
stdout, stderr = proc.communicate()
result = proc.returncode
if result != 0: # raised an error
msg = "Not generating reST from %s. An error occurred.\n" % filename
msg += stderr
print msg
return False
return True
def parse_docstring(block):
"""
Strips the docstring from a string representation of the file.
Returns the docstring and block without it
"""
ds = "\"{3}|'{3}"
try:
start = re.search(ds, block).end()
end = re.search(ds, block[start:]).start()
except: #TODO: make more informative
raise IOError("File %s does not have a docstring?")
docstring = block[start:start+end]
block = block[start+end+3:]
return docstring.strip(), block
def parse_file(block):
"""
Block is a raw string file.
"""
docstring, block = parse_docstring(block)
# just get the first line from the docstring
docstring = docstring.split('\n')[0] or docstring.split('\n')[1]
outfile = [docstring,'='*len(docstring),'']
block = block.split('\n')
# iterate through the rest of block, anything in comments is stripped of #
# anything else is fair game to go in an ipython directive
code_snippet = False
for line in block:
#if not len(line):
# continue
# preserve blank lines
if line.startswith('#') and not (line.startswith('#%') or
line.startswith('#@')):
# on some ReST text
if code_snippet: # were on a code snippet
outfile.append('')
code_snippet = False
line = line.strip()
# try to remove lines like # hello -> #hello
line = re.sub("(?<=#) (?!\s)", "", line)
# make sure commented out things have a space
line = re.sub("#\.\.(?!\s)", "#.. ", line)
line = re.sub("^#+", "", line) # strip multiple hashes
outfile.append(line)
else:
if not code_snippet: # new code block
outfile.append('\n.. ipython:: python\n')
code_snippet = True
# handle decorators and magic functions
if line.startswith('#%') or line.startswith('#@'):
line = line[1:]
outfile.append(' '+line.strip('\n'))
return '\n'.join(outfile)
def write_file(outfile, rst_file_pth):
"""
Write outfile to rst_file_pth
"""
print "Writing ", os.path.basename(rst_file_pth)
write_file = open(rst_file_pth, 'w')
write_file.writelines(outfile)
write_file.close()
def restify(example_file, filehash, fname):
"""
Takes a whole file ie., the result of file.read(), its md5 hash, and
the filename
Parse the file
Write the new .rst
Update the hash_dict
"""
write_filename = os.path.join(docs_rst_dir, fname[:-2] + 'rst')
try:
rst_file = parse_file(example_file)
except IOError as err:
raise IOError(err.message % fname)
write_file(rst_file, write_filename)
if filehash is not None:
hash_funcs.update_hash_dict(filehash, fname)
if __name__ == "__main__":
sys.path.insert(0, example_dir)
from run_all import filelist
sys.path.remove(example_dir)
if not os.path.exists(docs_rst_dir):
os.makedirs(docs_rst_dir)
if len(sys.argv) > 1: # given a file,files to process, no help flag yet
for example_file in sys.argv[1:]:
whole_file = open(example_file, 'r').read()
restify(whole_file, None, example_file)
else: # process the whole directory
for root, dirnames, filenames in os.walk(example_dir):
if 'notebooks' in root:
continue
for example in filenames:
example_file = os.path.join(root, example)
whole_file = open(example_file, 'r').read()
to_write, filehash = hash_funcs.check_hash(whole_file,
example)
if not to_write:
print "Hash has not changed for file %s" % example
continue
elif (not example.endswith('.py') or example in exclude_list or
not check_script(example_file)):
continue
restify(whole_file, filehash, example)
|
bsd-3-clause
|
yinpatt/thinkstats
|
code/regression.py
|
62
|
9652
|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.iteritems():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
|
gpl-3.0
|
equialgo/scikit-learn
|
examples/applications/plot_species_distribution_modeling.py
|
55
|
7386
|
"""
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
|
bsd-3-clause
|
waynenilsen/statsmodels
|
examples/python/tsa_dates.py
|
29
|
1169
|
## Dates in timeseries models
from __future__ import print_function
import statsmodels.api as sm
import pandas as pd
# ## Getting started
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# ## Using Pandas
#
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# Instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# ## Using explicit dates
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# Note: This attribute only exists if predict has been called. It holds the dates associated with the last call to predict.
|
bsd-3-clause
|
jlegendary/scikit-learn
|
examples/plot_multioutput_face_completion.py
|
330
|
3019
|
"""
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
|
bsd-3-clause
|
ben-hopps/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/image.py
|
69
|
28764
|
"""
The image module supports basic image loading, rescaling and display
operations.
"""
from __future__ import division
import os, warnings
import numpy as np
from numpy import ma
from matplotlib import rcParams
from matplotlib import artist as martist
from matplotlib import colors as mcolors
from matplotlib import cm
# For clarity, names from _image are given explicitly in this module:
from matplotlib import _image
from matplotlib import _png
# For user convenience, the names from _image are also imported into
# the image namespace:
from matplotlib._image import *
class AxesImage(martist.Artist, cm.ScalarMappable):
zorder = 1
# map interpolation strings to module constants
_interpd = {
'nearest' : _image.NEAREST,
'bilinear' : _image.BILINEAR,
'bicubic' : _image.BICUBIC,
'spline16' : _image.SPLINE16,
'spline36' : _image.SPLINE36,
'hanning' : _image.HANNING,
'hamming' : _image.HAMMING,
'hermite' : _image.HERMITE,
'kaiser' : _image.KAISER,
'quadric' : _image.QUADRIC,
'catrom' : _image.CATROM,
'gaussian' : _image.GAUSSIAN,
'bessel' : _image.BESSEL,
'mitchell' : _image.MITCHELL,
'sinc' : _image.SINC,
'lanczos' : _image.LANCZOS,
'blackman' : _image.BLACKMAN,
}
# reverse interp dict
_interpdr = dict([ (v,k) for k,v in _interpd.items()])
interpnames = _interpd.keys()
def __str__(self):
return "AxesImage(%g,%g;%gx%g)" % tuple(self.axes.bbox.bounds)
def __init__(self, ax,
cmap = None,
norm = None,
interpolation=None,
origin=None,
extent=None,
filternorm=1,
filterrad=4.0,
resample = False,
**kwargs
):
"""
interpolation and cmap default to their rc settings
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
extent is data axes (left, right, bottom, top) for making image plots
registered with data plots. Default is to label the pixel
centers with the zero-based row and column indices.
Additional kwargs are matplotlib.artist properties
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
if origin is None: origin = rcParams['image.origin']
self.origin = origin
self._extent = extent
self.set_filternorm(filternorm)
self.set_filterrad(filterrad)
self._filterrad = filterrad
self.set_interpolation(interpolation)
self.set_resample(resample)
self.axes = ax
self._imcache = None
self.update(kwargs)
def get_size(self):
'Get the numrows, numcols of the input image'
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape[:2]
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float
"""
martist.Artist.set_alpha(self, alpha)
self._imcache = None
def changed(self):
"""
Call this whenever the mappable is changed so observers can
update state
"""
self._imcache = None
self._rgbacache = None
cm.ScalarMappable.changed(self)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array or the image attribute')
xmin, xmax, ymin, ymax = self.get_extent()
dxintv = xmax-xmin
dyintv = ymax-ymin
# the viewport scale factor
sx = dxintv/self.axes.viewLim.width
sy = dyintv/self.axes.viewLim.height
numrows, numcols = self._A.shape[:2]
if sx > 2:
x0 = (self.axes.viewLim.x0-xmin)/dxintv * numcols
ix0 = max(0, int(x0 - self._filterrad))
x1 = (self.axes.viewLim.x1-xmin)/dxintv * numcols
ix1 = min(numcols, int(x1 + self._filterrad))
xslice = slice(ix0, ix1)
xmin_old = xmin
xmin = xmin_old + ix0*dxintv/numcols
xmax = xmin_old + ix1*dxintv/numcols
dxintv = xmax - xmin
sx = dxintv/self.axes.viewLim.width
else:
xslice = slice(0, numcols)
if sy > 2:
y0 = (self.axes.viewLim.y0-ymin)/dyintv * numrows
iy0 = max(0, int(y0 - self._filterrad))
y1 = (self.axes.viewLim.y1-ymin)/dyintv * numrows
iy1 = min(numrows, int(y1 + self._filterrad))
if self.origin == 'upper':
yslice = slice(numrows-iy1, numrows-iy0)
else:
yslice = slice(iy0, iy1)
ymin_old = ymin
ymin = ymin_old + iy0*dyintv/numrows
ymax = ymin_old + iy1*dyintv/numrows
dyintv = ymax - ymin
sy = dyintv/self.axes.viewLim.height
else:
yslice = slice(0, numrows)
if xslice != self._oldxslice or yslice != self._oldyslice:
self._imcache = None
self._oldxslice = xslice
self._oldyslice = yslice
if self._imcache is None:
if self._A.dtype == np.uint8 and len(self._A.shape) == 3:
im = _image.frombyte(self._A[yslice,xslice,:], 0)
im.is_grayscale = False
else:
if self._rgbacache is None:
x = self.to_rgba(self._A, self._alpha)
self._rgbacache = x
else:
x = self._rgbacache
im = _image.fromarray(x[yslice,xslice], 0)
if len(self._A.shape) == 2:
im.is_grayscale = self.cmap.is_gray()
else:
im.is_grayscale = False
self._imcache = im
if self.origin=='upper':
im.flipud_in()
else:
im = self._imcache
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
im.set_bg( *bg)
# image input dimensions
im.reset_matrix()
numrows, numcols = im.get_size()
im.set_interpolation(self._interpd[self._interpolation])
im.set_resample(self._resample)
# the viewport translation
tx = (xmin-self.axes.viewLim.x0)/dxintv * numcols
ty = (ymin-self.axes.viewLim.y0)/dyintv * numrows
l, b, r, t = self.axes.bbox.extents
widthDisplay = (round(r) + 0.5) - (round(l) - 0.5)
heightDisplay = (round(t) + 0.5) - (round(b) - 0.5)
widthDisplay *= magnification
heightDisplay *= magnification
im.apply_translation(tx, ty)
# resize viewport to display
rx = widthDisplay / numcols
ry = heightDisplay / numrows
im.apply_scaling(rx*sx, ry*sy)
im.resize(int(widthDisplay+0.5), int(heightDisplay+0.5),
norm=self._filternorm, radius=self._filterrad)
return im
def draw(self, renderer, *args, **kwargs):
if not self.get_visible(): return
if (self.axes.get_xscale() != 'linear' or
self.axes.get_yscale() != 'linear'):
warnings.warn("Images are not supported on non-linear axes.")
im = self.make_image(renderer.get_image_magnification())
im._url = self.get_url()
l, b, widthDisplay, heightDisplay = self.axes.bbox.bounds
clippath, affine = self.get_transformed_clip_path_and_affine()
renderer.draw_image(round(l), round(b), im, self.axes.bbox.frozen(),
clippath, affine)
def contains(self, mouseevent):
"""Test whether the mouse event occured within the image.
"""
if callable(self._contains): return self._contains(self,mouseevent)
# TODO: make sure this is consistent with patch and patch
# collection on nonlinear transformed coordinates.
# TODO: consider returning image coordinates (shouldn't
# be too difficult given that the image is rectilinear
x, y = mouseevent.xdata, mouseevent.ydata
xmin, xmax, ymin, ymax = self.get_extent()
if xmin > xmax:
xmin,xmax = xmax,xmin
if ymin > ymax:
ymin,ymax = ymax,ymin
#print x, y, xmin, xmax, ymin, ymax
if x is not None and y is not None:
inside = x>=xmin and x<=xmax and y>=ymin and y<=ymax
else:
inside = False
return inside,{}
def write_png(self, fname, noscale=False):
"""Write the image to png file with fname"""
im = self.make_image()
if noscale:
numrows, numcols = im.get_size()
im.reset_matrix()
im.set_interpolation(0)
im.resize(numcols, numrows)
im.flipud_out()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, fname)
def set_data(self, A, shape=None):
"""
Set the image array
ACCEPTS: numpy/PIL Image A"""
# check if data is PIL Image without importing Image
if hasattr(A,'getpixel'):
self._A = pil_to_array(A)
elif ma.isMA(A):
self._A = A
else:
self._A = np.asarray(A) # assume array
if self._A.dtype != np.uint8 and not np.can_cast(self._A.dtype, np.float):
raise TypeError("Image data can not convert to float")
if (self._A.ndim not in (2, 3) or
(self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))):
raise TypeError("Invalid dimensions for image data")
self._imcache =None
self._rgbacache = None
self._oldxslice = None
self._oldyslice = None
def set_array(self, A):
"""
retained for backwards compatibility - use set_data instead
ACCEPTS: numpy array A or PIL Image"""
# This also needs to be here to override the inherited
# cm.ScalarMappable.set_array method so it is not invoked
# by mistake.
self.set_data(A)
def set_extent(self, extent):
"""extent is data axes (left, right, bottom, top) for making image plots
"""
self._extent = extent
xmin, xmax, ymin, ymax = extent
corners = (xmin, ymin), (xmax, ymax)
self.axes.update_datalim(corners)
if self.axes._autoscaleon:
self.axes.set_xlim((xmin, xmax))
self.axes.set_ylim((ymin, ymax))
def get_interpolation(self):
"""
Return the interpolation method the image uses when resizing.
One of 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning',
'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
"""
return self._interpolation
def set_interpolation(self, s):
"""
Set the interpolation method the image uses when resizing.
ACCEPTS: ['nearest' | 'bilinear' | 'bicubic' | 'spline16' |
'spline36' | 'hanning' | 'hamming' | 'hermite' | 'kaiser' |
'quadric' | 'catrom' | 'gaussian' | 'bessel' | 'mitchell' |
'sinc' | 'lanczos' | ]
"""
if s is None: s = rcParams['image.interpolation']
s = s.lower()
if s not in self._interpd:
raise ValueError('Illegal interpolation string')
self._interpolation = s
def set_resample(self, v):
if v is None: v = rcParams['image.resample']
self._resample = v
def get_interpolation(self):
return self._resample
def get_extent(self):
'get the image extent: left, right, bottom, top'
if self._extent is not None:
return self._extent
else:
sz = self.get_size()
#print 'sz', sz
numrows, numcols = sz
if self.origin == 'upper':
return (-0.5, numcols-0.5, numrows-0.5, -0.5)
else:
return (-0.5, numcols-0.5, -0.5, numrows-0.5)
def set_filternorm(self, filternorm):
"""Set whether the resize filter norms the weights -- see
help for imshow
ACCEPTS: 0 or 1
"""
if filternorm:
self._filternorm = 1
else:
self._filternorm = 0
def get_filternorm(self):
'return the filternorm setting'
return self._filternorm
def set_filterrad(self, filterrad):
"""Set the resize filter radius only applicable to some
interpolation schemes -- see help for imshow
ACCEPTS: positive float
"""
r = float(filterrad)
assert(r>0)
self._filterrad = r
def get_filterrad(self):
'return the filterrad setting'
return self._filterrad
class NonUniformImage(AxesImage):
def __init__(self, ax,
**kwargs
):
interp = kwargs.pop('interpolation', 'nearest')
AxesImage.__init__(self, ax,
**kwargs)
AxesImage.set_interpolation(self, interp)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
x0, y0, v_width, v_height = self.axes.viewLim.bounds
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width *= magnification
height *= magnification
im = _image.pcolor(self._Ax, self._Ay, self._A,
height, width,
(x0, x0+v_width, y0, y0+v_height),
self._interpd[self._interpolation])
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
im.set_bg(*bg)
im.is_grayscale = self.is_grayscale
return im
def set_data(self, x, y, A):
x = np.asarray(x,np.float32)
y = np.asarray(y,np.float32)
if not ma.isMA(A):
A = np.asarray(A)
if len(x.shape) != 1 or len(y.shape) != 1\
or A.shape[0:2] != (y.shape[0], x.shape[0]):
raise TypeError("Axes don't match array shape")
if len(A.shape) not in [2, 3]:
raise TypeError("Can only plot 2D or 3D data")
if len(A.shape) == 3 and A.shape[2] not in [1, 3, 4]:
raise TypeError("3D arrays must have three (RGB) or four (RGBA) color components")
if len(A.shape) == 3 and A.shape[2] == 1:
A.shape = A.shape[0:2]
if len(A.shape) == 2:
if A.dtype != np.uint8:
A = (self.cmap(self.norm(A))*255).astype(np.uint8)
self.is_grayscale = self.cmap.is_gray()
else:
A = np.repeat(A[:,:,np.newaxis], 4, 2)
A[:,:,3] = 255
self.is_grayscale = True
else:
if A.dtype != np.uint8:
A = (255*A).astype(np.uint8)
if A.shape[2] == 3:
B = zeros(tuple(list(A.shape[0:2]) + [4]), np.uint8)
B[:,:,0:3] = A
B[:,:,3] = 255
A = B
self.is_grayscale = False
self._A = A
self._Ax = x
self._Ay = y
self._imcache = None
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def set_interpolation(self, s):
if s != None and not s in ('nearest','bilinear'):
raise NotImplementedError('Only nearest neighbor and bilinear interpolations are supported')
AxesImage.set_interpolation(self, s)
def get_extent(self):
if self._A is None:
raise RuntimeError('Must set data first')
return self._Ax[0], self._Ax[-1], self._Ay[0], self._Ay[-1]
def set_filternorm(self, s):
pass
def set_filterrad(self, s):
pass
def set_norm(self, norm):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
cm.ScalarMappable.set_norm(self, norm)
def set_cmap(self, cmap):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
cm.ScalarMappable.set_cmap(self, norm)
class PcolorImage(martist.Artist, cm.ScalarMappable):
'''
Make a pcolor-style plot with an irregular rectangular grid.
This uses a variation of the original irregular image code,
and it is used by pcolorfast for the corresponding grid type.
'''
def __init__(self, ax,
x=None,
y=None,
A=None,
cmap = None,
norm = None,
**kwargs
):
"""
cmap defaults to its rc setting
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
Additional kwargs are matplotlib.artist properties
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.axes = ax
self._rgbacache = None
self.update(kwargs)
self.set_data(x, y, A)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
bg = (np.array(bg)*255).astype(np.uint8)
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width = width * magnification
height = height * magnification
if self.check_update('array'):
A = self.to_rgba(self._A, alpha=self._alpha, bytes=True)
self._rgbacache = A
if self._A.ndim == 2:
self.is_grayscale = self.cmap.is_gray()
else:
A = self._rgbacache
vl = self.axes.viewLim
im = _image.pcolor2(self._Ax, self._Ay, A,
height,
width,
(vl.x0, vl.x1, vl.y0, vl.y1),
bg)
im.is_grayscale = self.is_grayscale
return im
def draw(self, renderer, *args, **kwargs):
if not self.get_visible(): return
im = self.make_image(renderer.get_image_magnification())
renderer.draw_image(round(self.axes.bbox.xmin),
round(self.axes.bbox.ymin),
im,
self.axes.bbox.frozen(),
*self.get_transformed_clip_path_and_affine())
def set_data(self, x, y, A):
if not ma.isMA(A):
A = np.asarray(A)
if x is None:
x = np.arange(0, A.shape[1]+1, dtype=np.float64)
else:
x = np.asarray(x, np.float64).ravel()
if y is None:
y = np.arange(0, A.shape[0]+1, dtype=np.float64)
else:
y = np.asarray(y, np.float64).ravel()
if A.shape[:2] != (y.size-1, x.size-1):
print A.shape
print y.size
print x.size
raise ValueError("Axes don't match array shape")
if A.ndim not in [2, 3]:
raise ValueError("A must be 2D or 3D")
if A.ndim == 3 and A.shape[2] == 1:
A.shape = A.shape[:2]
self.is_grayscale = False
if A.ndim == 3:
if A.shape[2] in [3, 4]:
if (A[:,:,0] == A[:,:,1]).all() and (A[:,:,0] == A[:,:,2]).all():
self.is_grayscale = True
else:
raise ValueError("3D arrays must have RGB or RGBA as last dim")
self._A = A
self._Ax = x
self._Ay = y
self.update_dict['array'] = True
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float
"""
martist.Artist.set_alpha(self, alpha)
self.update_dict['array'] = True
class FigureImage(martist.Artist, cm.ScalarMappable):
zorder = 1
def __init__(self, fig,
cmap = None,
norm = None,
offsetx = 0,
offsety = 0,
origin=None,
**kwargs
):
"""
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
kwargs are an optional list of Artist keyword args
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
if origin is None: origin = rcParams['image.origin']
self.origin = origin
self.figure = fig
self.ox = offsetx
self.oy = offsety
self.update(kwargs)
self.magnification = 1.0
def contains(self, mouseevent):
"""Test whether the mouse event occured within the image.
"""
if callable(self._contains): return self._contains(self,mouseevent)
xmin, xmax, ymin, ymax = self.get_extent()
xdata, ydata = mouseevent.x, mouseevent.y
#print xdata, ydata, xmin, xmax, ymin, ymax
if xdata is not None and ydata is not None:
inside = xdata>=xmin and xdata<=xmax and ydata>=ymin and ydata<=ymax
else:
inside = False
return inside,{}
def get_size(self):
'Get the numrows, numcols of the input image'
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape[:2]
def get_extent(self):
'get the image extent: left, right, bottom, top'
numrows, numcols = self.get_size()
return (-0.5+self.ox, numcols-0.5+self.ox,
-0.5+self.oy, numrows-0.5+self.oy)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
x = self.to_rgba(self._A, self._alpha)
self.magnification = magnification
# if magnification is not one, we need to resize
ismag = magnification!=1
#if ismag: raise RuntimeError
if ismag:
isoutput = 0
else:
isoutput = 1
im = _image.fromarray(x, isoutput)
fc = self.figure.get_facecolor()
im.set_bg( *mcolors.colorConverter.to_rgba(fc, 0) )
im.is_grayscale = (self.cmap.name == "gray" and
len(self._A.shape) == 2)
if ismag:
numrows, numcols = self.get_size()
numrows *= magnification
numcols *= magnification
im.set_interpolation(_image.NEAREST)
im.resize(numcols, numrows)
if self.origin=='upper':
im.flipud_out()
return im
def draw(self, renderer, *args, **kwargs):
if not self.get_visible(): return
# todo: we should be able to do some cacheing here
im = self.make_image(renderer.get_image_magnification())
renderer.draw_image(round(self.ox), round(self.oy), im, self.figure.bbox,
*self.get_transformed_clip_path_and_affine())
def write_png(self, fname):
"""Write the image to png file with fname"""
im = self.make_image()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, fname)
def imread(fname):
"""
Return image file in *fname* as :class:`numpy.array`.
Return value is a :class:`numpy.array`. For grayscale images, the
return array is MxN. For RGB images, the return value is MxNx3.
For RGBA images the return value is MxNx4.
matplotlib can only read PNGs natively, but if `PIL
<http://www.pythonware.com/products/pil/>`_ is installed, it will
use it to load the image and return an array (if possible) which
can be used with :func:`~matplotlib.pyplot.imshow`.
TODO: support RGB and grayscale return values in _image.readpng
"""
def pilread():
'try to load the image with PIL or return None'
try: import Image
except ImportError: return None
image = Image.open( fname )
return pil_to_array(image)
handlers = {'png' :_png.read_png,
}
basename, ext = os.path.splitext(fname)
ext = ext.lower()[1:]
if ext not in handlers.keys():
im = pilread()
if im is None:
raise ValueError('Only know how to handle extensions: %s; with PIL installed matplotlib can handle more images' % handlers.keys())
return im
handler = handlers[ext]
return handler(fname)
def pil_to_array( pilImage ):
"""
load a PIL image and return it as a numpy array of uint8. For
grayscale images, the return array is MxN. For RGB images, the
return value is MxNx3. For RGBA images the return value is MxNx4
"""
def toarray(im):
'return a 1D array of floats'
x_str = im.tostring('raw',im.mode,0,-1)
x = np.fromstring(x_str,np.uint8)
return x
if pilImage.mode in ('RGBA', 'RGBX'):
im = pilImage # no need to convert images
elif pilImage.mode=='L':
im = pilImage # no need to luminance images
# return MxN luminance array
x = toarray(im)
x.shape = im.size[1], im.size[0]
return x
elif pilImage.mode=='RGB':
#return MxNx3 RGB array
im = pilImage # no need to RGB images
x = toarray(im)
x.shape = im.size[1], im.size[0], 3
return x
else: # try to convert to an rgba image
try:
im = pilImage.convert('RGBA')
except ValueError:
raise RuntimeError('Unknown image mode')
# return MxNx4 RGBA array
x = toarray(im)
x.shape = im.size[1], im.size[0], 4
return x
def thumbnail(infile, thumbfile, scale=0.1, interpolation='bilinear',
preview=False):
"""
make a thumbnail of image in *infile* with output filename
*thumbfile*.
*infile* the image file -- must be PNG or PIL readable if you
have `PIL <http://www.pythonware.com/products/pil/>`_ installed
*thumbfile*
the thumbnail filename
*scale*
the scale factor for the thumbnail
*interpolation*
the interpolation scheme used in the resampling
*preview*
if True, the default backend (presumably a user interface
backend) will be used which will cause a figure to be raised
if :func:`~matplotlib.pyplot.show` is called. If it is False,
a pure image backend will be used depending on the extension,
'png'->FigureCanvasAgg, 'pdf'->FigureCanvasPDF,
'svg'->FigureCanvasSVG
See examples/misc/image_thumbnail.py.
.. htmlonly::
:ref:`misc-image_thumbnail`
Return value is the figure instance containing the thumbnail
"""
basedir, basename = os.path.split(infile)
baseout, extout = os.path.splitext(thumbfile)
im = imread(infile)
rows, cols, depth = im.shape
# this doesn't really matter, it will cancel in the end, but we
# need it for the mpl API
dpi = 100
height = float(rows)/dpi*scale
width = float(cols)/dpi*scale
extension = extout.lower()
if preview:
# let the UI backend do everything
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(width, height), dpi=dpi)
else:
if extension=='.png':
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
elif extension=='.pdf':
from matplotlib.backends.backend_pdf import FigureCanvasPDF as FigureCanvas
elif extension=='.svg':
from matplotlib.backends.backend_svg import FigureCanvasSVG as FigureCanvas
else:
raise ValueError("Can only handle extensions 'png', 'svg' or 'pdf'")
from matplotlib.figure import Figure
fig = Figure(figsize=(width, height), dpi=dpi)
canvas = FigureCanvas(fig)
ax = fig.add_axes([0,0,1,1], aspect='auto', frameon=False, xticks=[], yticks=[])
basename, ext = os.path.splitext(basename)
ax.imshow(im, aspect='auto', resample=True, interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
|
agpl-3.0
|
mdegis/machine-learning
|
tools/startup.py
|
7
|
1048
|
#!/usr/bin/python
print
print "checking for nltk"
try:
import nltk
except ImportError:
print "you should install nltk before continuing"
print "checking for numpy"
try:
import numpy
except ImportError:
print "you should install numpy before continuing"
print "checking for sklearn"
try:
import sklearn
except:
print "you should install sklearn before continuing"
print
print "downloading the Enron dataset (this may take a while)"
print "to check on progress, you can cd up one level, then execute <ls -lthr>"
print "Enron dataset should be last item on the list, along with its current size"
print "download will complete at about 423 MB"
import urllib
url = "https://www.cs.cmu.edu/~./enron/enron_mail_20150507.tgz"
urllib.urlretrieve(url, filename="../enron_mail_20150507.tgz")
print "download complete!"
print
print "unzipping Enron dataset (this may take a while)"
import tarfile
import os
os.chdir("..")
tfile = tarfile.open("enron_mail_20150507.tgz", "r:gz")
tfile.extractall(".")
print "you're ready to go!"
|
gpl-3.0
|
ngoix/OCRF
|
sklearn/ensemble/weight_boosting.py
|
28
|
40740
|
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_regressor
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype,
y_numeric=is_regressor(self))
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float64)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
|
bsd-3-clause
|
caspar/PhysicsLab
|
10_Midterm/pythonprimer.py
|
2
|
1111
|
#Lab 0
#coding=utf-
import numpy as np;
import matplotlib.pyplot as plt;
data = "SampleData-1.csv";
#load csv
measurement, temperature, pressure, uncertainty, error = np.loadtxt(data, skiprows=5, unpack=True, delimiter=',');
#plot data
# plt.xlabel("Temperature ($^\circ$C)");
# plt.ylabel("Pressure (lb/in$ ^2$)");
#
# plt.errorbar(temperature, pressure, error, linestyle = 'None', marker='d', mfc='yellow', mec='r', ms=20, mew=1, ecolor = "k");
# #plt.show();
# #coupled pendulums
A = 0.1
w1 = 2 * np.pi * 5
w2 = 2 * np.pi * 5.2
theta_a1 = []
theta_b1 = []
theta_a2 = []
theta_b2 = []
times = [];
for t in range (0,400):
theta_a1.append(A * np.cos(w1 * t / 200) + A * np.cos(w2 * t / 200));
theta_b1.append(A * np.cos(w1 * t / 200) - A * np.cos(w2 * t / 200));
theta_a2.append(2 * A * np.cos((w2 - w1) / 2 * t / 200) * np.cos((w2 + w1) / 2 * t / 200));
theta_b2.append(2 * A * np.sin((w2 - w1) / 2 * t / 200) * np.sin((w2 + w1) / 2 * t / 200));
times.append(t)
plt.plot(times, theta_a1);
plt.plot(times, theta_b1);
plt.plot(times, theta_a2);
plt.plot(times, theta_b2);
plt.show();
|
mit
|
jarn0ld/gnuradio
|
gr-fec/python/fec/polar/channel_construction_awgn.py
|
24
|
8560
|
#!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Based on 2 papers:
[1] Ido Tal, Alexander Vardy: 'How To Construct Polar Codes', 2013
for an in-depth description of a widely used algorithm for channel construction.
[2] Harish Vangala, Emanuele Viterbo, Yi Hong: 'A Comparative Study of Polar Code Constructions for the AWGN Channel', 2015
for an overview of different approaches
'''
from scipy.optimize import fsolve
from scipy.special import erfc
from helper_functions import *
from channel_construction_bec import bhattacharyya_bounds
def solver_equation(val, s):
cw_lambda = codeword_lambda_callable(s)
ic_lambda = instantanious_capacity_callable()
return lambda y: ic_lambda(cw_lambda(y)) - val
def solve_capacity(a, s):
eq = solver_equation(a, s)
res = fsolve(eq, 1)
return np.abs(res[0]) # only positive values needed.
def codeword_lambda_callable(s):
return lambda y: np.exp(-2 * y * np.sqrt(2 * s))
def codeword_lambda(y, s):
return codeword_lambda_callable(s)(y)
def instantanious_capacity_callable():
return lambda x : 1 - np.log2(1 + x) + (x * np.log2(x) / (1 + x))
def instantanious_capacity(x):
return instantanious_capacity_callable()(x)
def q_function(x):
# Q(x) = (1 / sqrt(2 * pi) ) * integral (x to inf) exp(- x ^ 2 / 2) dx
return .5 * erfc(x / np.sqrt(2))
def discretize_awgn(mu, design_snr):
'''
needed for Binary-AWGN channels.
in [1] described in Section VI
in [2] described as a function of the same name.
in both cases reduce infinite output alphabet to a finite output alphabet of a given channel.
idea:
1. instantaneous capacity C(x) in interval [0, 1]
2. split into mu intervals.
3. find corresponding output alphabet values y of likelihood ratio function lambda(y) inserted into C(x)
4. Calculate probability for each value given that a '0' or '1' is was transmitted.
'''
s = 10 ** (design_snr / 10)
a = np.zeros(mu + 1, dtype=float)
a[-1] = np.inf
for i in range(1, mu):
a[i] = solve_capacity(1. * i / mu, s)
factor = np.sqrt(2 * s)
tpm = np.zeros((2, mu))
for j in range(mu):
tpm[0][j] = q_function(factor + a[j]) - q_function(factor + a[j + 1])
tpm[1][j] = q_function(-1. * factor + a[j]) - q_function(-1. * factor + a[j + 1])
tpm = tpm[::-1]
tpm[0] = tpm[0][::-1]
tpm[1] = tpm[1][::-1]
return tpm
def instant_capacity_delta_callable():
return lambda a, b: -1. * (a + b) * np.log2((a + b) / 2) + a * np.log2(a) + b * np.log2(b)
def capacity_delta_callable():
c = instant_capacity_delta_callable()
return lambda a, b, at, bt: c(a, b) + c(at, bt) - c(a + at, b + bt)
def quantize_to_size(tpm, mu):
# This is a degrading merge, compare [1]
calculate_delta_I = capacity_delta_callable()
L = np.shape(tpm)[1]
if not mu < L:
print('WARNING: This channel gets too small!')
# lambda works on vectors just fine. Use Numpy vector awesomeness.
delta_i_vec = calculate_delta_I(tpm[0, 0:-1], tpm[1, 0:-1], tpm[0, 1:], tpm[1, 1:])
for i in range(L - mu):
d = np.argmin(delta_i_vec)
ap = tpm[0, d] + tpm[0, d + 1]
bp = tpm[1, d] + tpm[1, d + 1]
if d > 0:
delta_i_vec[d - 1] = calculate_delta_I(tpm[0, d - 1], tpm[1, d - 1], ap, bp)
if d < delta_i_vec.size - 1:
delta_i_vec[d + 1] = calculate_delta_I(ap, bp, tpm[0, d + 1], tpm[1, d + 1])
delta_i_vec = np.delete(delta_i_vec, d)
tpm = np.delete(tpm, d, axis=1)
tpm[0, d] = ap
tpm[1, d] = bp
return tpm
def upper_bound_z_params(z, block_size, design_snr):
upper_bound = bhattacharyya_bounds(design_snr, block_size)
z = np.minimum(z, upper_bound)
return z
def tal_vardy_tpm_algorithm(block_size, design_snr, mu):
mu = mu // 2 # make sure algorithm uses only as many bins as specified.
block_power = power_of_2_int(block_size)
channels = np.zeros((block_size, 2, mu))
channels[0] = discretize_awgn(mu, design_snr) * 2
print('Constructing polar code with Tal-Vardy algorithm')
print('(block_size = {0}, design SNR = {1}, mu = {2}'.format(block_size, design_snr, 2 * mu))
show_progress_bar(0, block_size)
for j in range(0, block_power):
u = 2 ** j
for t in range(u):
show_progress_bar(u + t, block_size)
# print("(u={0}, t={1}) = {2}".format(u, t, u + t))
ch1 = upper_convolve(channels[t], mu)
ch2 = lower_convolve(channels[t], mu)
channels[t] = quantize_to_size(ch1, mu)
channels[u + t] = quantize_to_size(ch2, mu)
z = np.zeros(block_size)
for i in range(block_size):
z[i] = bhattacharyya_parameter(channels[i])
z = z[bit_reverse_vector(np.arange(block_size), block_power)]
z = upper_bound_z_params(z, block_size, design_snr)
show_progress_bar(block_size, block_size)
print('')
print('channel construction DONE')
return z
def merge_lr_based(q, mu):
lrs = q[0] / q[1]
vals, indices, inv_indices = np.unique(lrs, return_index=True, return_inverse=True)
# compare [1] (20). Ordering of representatives according to LRs.
temp = np.zeros((2, len(indices)), dtype=float)
if vals.size < mu:
return q
for i in range(len(indices)):
merge_pos = np.where(inv_indices == i)[0]
sum_items = q[:, merge_pos]
if merge_pos.size > 1:
sum_items = np.sum(q[:, merge_pos], axis=1)
temp[0, i] = sum_items[0]
temp[1, i] = sum_items[1]
return temp
def upper_convolve(tpm, mu):
q = np.zeros((2, mu ** 2))
idx = -1
for i in range(mu):
idx += 1
q[0, idx] = (tpm[0, i] ** 2 + tpm[1, i] ** 2) / 2
q[1, idx] = tpm[0, i] * tpm[1, i]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j] + tpm[1, i] * tpm[1, j]
q[1, idx] = tpm[0, i] * tpm[1, j] + tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def lower_convolve(tpm, mu):
q = np.zeros((2, mu * (mu + 1)))
idx = -1
for i in range(0, mu):
idx += 1
q[0, idx] = (tpm[0, i] ** 2) / 2
q[1, idx] = (tpm[1, i] ** 2) / 2
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, i]
q[1, idx] = q[0, idx]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j]
q[1, idx] = tpm[1, i] * tpm[1, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, j]
q[1, idx] = tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def swap_values(first, second):
return second, first
def normalize_q(q, tpm):
original_factor = np.sum(tpm)
next_factor = np.sum(q)
factor = original_factor / next_factor
return q * factor
def main():
print 'channel construction AWGN main'
n = 8
m = 2 ** n
design_snr = 0.0
mu = 16
z_params = tal_vardy_tpm_algorithm(m, design_snr, mu)
print(z_params)
if 0:
import matplotlib.pyplot as plt
plt.plot(z_params)
plt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
SelinaChe/Complex-Object-Detection-StackGAN
|
misc/preprocess_birds.py
|
2
|
3274
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# import tensorflow as tf
import numpy as np
import os
import pickle
from misc.utils import get_image
import scipy.misc
import pandas as pd
# from glob import glob
# TODO: 1. current label is temporary, need to change according to real label
# 2. Current, only split the data into train, need to handel train, test
LR_HR_RETIO = 4
IMSIZE = 256
LOAD_SIZE = int(IMSIZE * 76 / 64)
BIRD_DIR = 'Data/birds'
def load_filenames(data_dir):
filepath = data_dir + 'filenames.pickle'
with open(filepath, 'rb') as f:
filenames = pickle.load(f)
print('Load filenames from: %s (%d)' % (filepath, len(filenames)))
return filenames
def load_bbox(data_dir):
bbox_path = os.path.join(data_dir, 'CUB_200_2011/bounding_boxes.txt')
df_bounding_boxes = pd.read_csv(bbox_path,
delim_whitespace=True,
header=None).astype(int)
#
filepath = os.path.join(data_dir, 'CUB_200_2011/images.txt')
df_filenames = pd.read_csv(filepath, delim_whitespace=True, header=None)
filenames = df_filenames[1].tolist()
print('Total filenames: ', len(filenames), filenames[0])
#
filename_bbox = {img_file[:-4]: [] for img_file in filenames}
numImgs = len(filenames)
for i in xrange(0, numImgs):
# bbox = [x-left, y-top, width, height]
bbox = df_bounding_boxes.iloc[i][1:].tolist()
key = filenames[i][:-4]
filename_bbox[key] = bbox
#
return filename_bbox
def save_data_list(inpath, outpath, filenames, filename_bbox):
hr_images = []
lr_images = []
lr_size = int(LOAD_SIZE / LR_HR_RETIO)
cnt = 0
for key in filenames:
bbox = filename_bbox[key]
f_name = '%s/CUB_200_2011/images/%s.jpg' % (inpath, key)
img = get_image(f_name, LOAD_SIZE, is_crop=True, bbox=bbox)
img = img.astype('uint8')
hr_images.append(img)
lr_img = scipy.misc.imresize(img, [lr_size, lr_size], 'bicubic')
lr_images.append(lr_img)
cnt += 1
if cnt % 100 == 0:
print('Load %d......' % cnt)
#
print('images', len(hr_images), hr_images[0].shape, lr_images[0].shape)
#
outfile = outpath + str(LOAD_SIZE) + 'images.pickle'
with open(outfile, 'wb') as f_out:
pickle.dump(hr_images, f_out)
print('save to: ', outfile)
#
outfile = outpath + str(lr_size) + 'images.pickle'
with open(outfile, 'wb') as f_out:
pickle.dump(lr_images, f_out)
print('save to: ', outfile)
def convert_birds_dataset_pickle(inpath):
# Load dictionary between image filename to its bbox
filename_bbox = load_bbox(inpath)
# ## For Train data
train_dir = os.path.join(inpath, 'train/')
train_filenames = load_filenames(train_dir)
save_data_list(inpath, train_dir, train_filenames, filename_bbox)
# ## For Test data
test_dir = os.path.join(inpath, 'test/')
test_filenames = load_filenames(test_dir)
save_data_list(inpath, test_dir, test_filenames, filename_bbox)
if __name__ == '__main__':
convert_birds_dataset_pickle(BIRD_DIR)
|
mit
|
simonsfoundation/CaImAn
|
setup.py
|
2
|
4076
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import os
from os import path
import sys
import numpy as np
from Cython.Build import cythonize
from setuptools.extension import Extension
from distutils.command.build_ext import build_ext
"""
Installation script for anaconda installers
"""
here = path.abspath(path.dirname(__file__))
with open('README.md', 'r') as rmf:
readme = rmf.read()
with open('VERSION', 'r') as verfile:
version = verfile.read().strip()
############
# This stanza asks for caiman datafiles (demos, movies, ...) to be stashed in "share/caiman", either
# in the system directory if this was installed with a system python, or inside the virtualenv/conda
# environment dir if this was installed with a venv/conda python. This ensures:
# 1) That they're present somewhere on the system if Caiman is installed this way, and
# 2) We can programmatically get at them to manage the user's conda data directory.
#
# We can access these by using sys.prefix as the base of the directory and constructing from there.
# Note that if python's packaging standards ever change the install base of data_files to be under the
# package that made them, we can switch to using the pkg_resources API.
binaries = ['caimanmanager.py']
extra_dirs = ['bin', 'demos', 'docs', 'model']
data_files = [('share/caiman', ['LICENSE.txt', 'README.md', 'test_demos.sh', 'VERSION']),
('share/caiman/example_movies', ['example_movies/data_endoscope.tif', 'example_movies/demoMovie.tif']),
('share/caiman/testdata', ['testdata/groundtruth.npz', 'testdata/example.npz']),
]
for part in extra_dirs:
newpart = [("share/caiman/" + d, [os.path.join(d,f) for f in files]) for d, folders, files in os.walk(part)]
for newcomponent in newpart:
data_files.append(newcomponent)
data_files.append(['bin', binaries])
############
# compile with: python setup.py build_ext -i
# clean up with: python setup.py clean --all
if sys.platform == 'darwin':
# see https://github.com/pandas-dev/pandas/issues/23424
extra_compiler_args = ['-stdlib=libc++'] # not needed #, '-mmacosx-version-min=10.9']
else:
extra_compiler_args = []
ext_modules = [Extension("caiman.source_extraction.cnmf.oasis",
sources=["caiman/source_extraction/cnmf/oasis.pyx"],
include_dirs=[np.get_include()],
language="c++",
extra_compile_args = extra_compiler_args,
extra_link_args = extra_compiler_args,
)]
setup(
name='caiman',
version=version,
author='Andrea Giovannucci, Eftychios Pnevmatikakis, Johannes Friedrich, Valentina Staneva, Ben Deverett, Erick Cobos, Jeremie Kalfon',
author_email='[email protected]',
url='https://github.com/flatironinstitute/CaImAn',
license='GPL-2',
description='Advanced algorithms for ROI detection and deconvolution of Calcium Imaging datasets.',
long_description=readme,
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Researchers',
'Topic :: Calcium Imaging :: Analysis Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GPL-2 License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
],
keywords='fluorescence calcium ca imaging deconvolution ROI identification',
packages=find_packages(exclude=['use_cases', 'use_cases.*']),
data_files=data_files,
install_requires=[''],
ext_modules=cythonize(ext_modules),
cmdclass={'build_ext': build_ext}
)
|
gpl-2.0
|
shilad/geo-provenance
|
py/gpinfer.py
|
1
|
4347
|
#!/usr/bin/python -O
import collections
import math
from gputils import *
from country import read_countries
# from geoip import GeoIPFeature
from milgov import MilGovFeature
from pagelang import PagelangsFeature
from whois import FreetextWhoisFeature, ParsedWhoisFeature
from wikidata import WikidataFeature
from tld import TldFeature
class PriorFeature:
def __init__(self, countries=None):
if not countries: countries = read_countries()
self.name = 'prior'
self.prior = {}
for c in countries:
self.prior[c.iso] = c.prior
if len(self.prior) == 0:
raise Exception('no country priors!')
def infer(self, url):
return (0.2, dict(self.prior))
def logit(p):
return math.log(p) - math.log(1 - p)
def prob2sigmoid(p, conf):
conf = conf - 0.0001 # avoid infinities
return logit(p * conf + (1.0 - conf) / 2)
def logistic(x):
return 1.0 / (1 + math.exp(-x))
class LogisticInferrer:
def __init__(self, features=None, intercept=None, coefficients=None):
self.name = 'logistic'
self.reg = None
if not features:
self.features = [
PriorFeature(),
# GeoIPFeature(),
ParsedWhoisFeature(),
FreetextWhoisFeature(),
MilGovFeature(),
WikidataFeature(),
PagelangsFeature(),
TldFeature()
]
self.intercept = -7.06
self.coefficients = [2.38, 5.39, 2.06, 2.87, 2.03, 5.37, 7.03]
else:
if not intercept or not coefficients:
raise GPException("if features are specified, intercept and coefficients must be too.")
self.features = features
self.intercept = intercept
self.coefficients = coefficients
self.countries = read_countries()
def get_feature(self, name):
for f in self.features:
if f.name == name:
return f
return None
def make_rows(self, url_info):
rows = collections.defaultdict(list)
for f in self.features:
(conf, dist) = f.infer(url_info)
if dist:
for c in self.countries:
rows[c.iso].append(dist.get(c.iso, 0.0))
else:
for c in self.countries:
rows[c.iso].append(1.0 / len(self.countries))
return rows
def train(self, data):
from sklearn.linear_model import LogisticRegression
Y = [] # 1 or 0
X = [] # feature vectors
for (urlinfo, actual) in data:
rows = self.make_rows(urlinfo)
for c in self.countries:
Y.append(1 if c.iso == actual else 0)
X.append(rows[c.iso])
self.reg = LogisticRegression()
self.reg.fit(X, Y)
# Y2 = reg.pre(X)
#
# fit_reg = LogisticRegression()
# fit_reg.fit(Y2, Y)
self.intercept = self.reg.intercept_[0]
self.coefficients = self.reg.coef_[0]
def get_equation(self):
eq = '%.2f' % self.reg.intercept_
for (i, f) in enumerate(self.features):
eq += ' + %.2f * %s' % (self.reg.coef_[0][i], f.name)
return eq
def infer(self, url_info):
result = {}
for c in self.countries:
result[c.iso] = self.intercept
for (i, f) in enumerate(self.features):
(conf, dist) = f.infer(url_info)
if conf > 0 and dist:
for c in dist:
c2 = u'gb' if c == u'uk' else c
result[c2] += self.coefficients[i] * dist[c]
else:
for c in result:
result[c] += self.coefficients[i] * 1.0 / len(result)
# the raising to 1.2nd power approximately calibrates
# output probabilities to 85% for correct and 66% for incorrect,
# but does not affect evaluation accuracy
for (c, score) in result.items():
result[c] = logistic(score) ** 1.2
total = sum(result.values())
for (c, prob) in result.items():
result[c] = result[c] / total
return (1.0, result)
if __name__ == '__main__':
inferrer = LogisticInferrer()
inferrer.train(read_gold())
|
apache-2.0
|
apoorva-sharma/deep-frame-interpolation
|
conv_autoencoder_mandrill.py
|
1
|
5711
|
# Adapted from Parag K. Mital, Jan 2016 convolutional_autoencoder.py
import tensorflow as tf
import numpy as np
import math
from libs.activations import lrelu
from libs.utils import corrupt
def autoencoder(input_shape=[None, 16384], # [num_examples, num_pixels]
n_filters=[1, 10, 10, 10], # number of filters in each conv layer
filter_sizes=[3, 3, 3, 3]):
"""Build a deep autoencoder w/ tied weights.
Parameters
----------
input_shape : list, optional
Description
n_filters : list, optional
Description
filter_sizes : list, optional
Description
Returns
-------
x : Tensor
Input placeholder to the network
z : Tensor
Inner-most latent representation
y : Tensor
Output reconstruction of the input
cost : Tensor
Overall cost to use for training
Raises
------
ValueError
Description
"""
# input to the network
x = tf.placeholder(
tf.float32, input_shape, name='x')
# ensure 2-d is converted to square tensor.
if len(x.get_shape()) == 2: # assuming second dim of input_shape is num_pixels of an example
# convert 1D image into 2D and add fourth dimension for num_filters
x_dim = np.sqrt(x.get_shape().as_list()[1]) # assuming each image is square
if x_dim != int(x_dim): # not a square image
raise ValueError('Unsupported input dimensions')
x_dim = int(x_dim)
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, n_filters[0]]) # reshape input samples to m * 2D image * 1 layer for input
elif len(x.get_shape()) == 4: # assuming we already did that
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
current_input = x_tensor
# Build the encoder
encoder = []
shapes = []
for layer_i, n_output in enumerate(n_filters[1:]): # enumerate the number of filters in each hidden layer
n_input = current_input.get_shape().as_list()[3] # number of filters in current input
shapes.append(current_input.get_shape().as_list()) # append shape of this layer's input
W = tf.Variable(
tf.random_uniform([
filter_sizes[layer_i],
filter_sizes[layer_i], # a filter_size x filter_size filter
n_input, n_output], # mapping n_inps to n_outs
-1.0 / math.sqrt(n_input),
1.0 / math.sqrt(n_input))) # create Weight mx W_ij = rand([-1,1])
b = tf.Variable(tf.zeros([n_output])) # create Bias vector
encoder.append(W)
output = lrelu( # apply non-linearity
tf.add(tf.nn.conv2d(
current_input, W, strides=[1, 2, 2, 1], padding='SAME'), b)) # add bias to output of conv(inps,W)
current_input = output
# store the latent representation
z = current_input
encoder.reverse() # going backwards for the decoder
shapes.reverse()
# Build the decoder using the same weights
for layer_i, shape in enumerate(shapes):
W = encoder[layer_i] # using same weights as encoder
b = tf.Variable(tf.zeros([W.get_shape().as_list()[2]])) # but different biases
output = lrelu(tf.add(
tf.nn.conv2d_transpose( # transpose conv is deconv
current_input, W,
tf.pack([tf.shape(x)[0], shape[1], shape[2], shape[3]]), # output shape
strides=[1, 2, 2, 1], padding='SAME'), b))
current_input = output
# now have the reconstruction through the network
y = current_input
# cost function measures pixel-wise difference between output and input
cost = tf.reduce_sum(tf.square(y - x_tensor))
# %%
return {'x': x, 'z': z, 'y': y, 'cost': cost} # output of symbolic operations representing
# input, intermediate, output, and cost
# %%
def test_mandrill():
"""Test the convolutional autoencder using Mandrill Small image."""
# %%
import tensorflow as tf
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
# Load Madrill Small data
mandrill_small = scipy.io.loadmat('mandrill_small.mat')
mandrill_small = mandrill_small['A']
mandrill_small = np.array(mandrill_small)
mandrill_small = np.transpose(mandrill_small, [2,0,1])
mandrill_small = np.reshape(mandrill_small, [3,128*128])
mean_img = np.mean(mandrill_small, axis=0)
ae = autoencoder()
# %%
learning_rate = 0.01
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae['cost'])
# We create a session to use the graph
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# Fit all training data
n_epochs = 1
for epoch_i in range(n_epochs):
batch_xs = mandrill_small
train = np.array([img - mean_img for img in batch_xs])
sess.run(optimizer, feed_dict={ae['x']: train})
print(epoch_i, sess.run(ae['cost'], feed_dict={ae['x']: train}))
# Plot example reconstructions
test_xs = mandrill_small
n_examples = 3
test_xs_norm = np.array([img - mean_img for img in test_xs])
recon = sess.run(ae['y'], feed_dict={ae['x']: test_xs_norm})
print(recon.shape)
fig, axs = plt.subplots(2, n_examples, figsize=(10, 2))
for example_i in range(n_examples):
axs[0][example_i].imshow(
np.reshape(test_xs[example_i, :], (128, 128)))
axs[1][example_i].imshow(
np.reshape(
np.reshape(recon[example_i, ...], (128**2,)) + mean_img,
(128, 128)))
fig.show()
plt.draw()
plt.waitforbuttonpress()
if __name__ == '__main__':
test_mandrill()
|
mit
|
wangjohn/shape_of_stones
|
continuous_surface_2D.py
|
1
|
5849
|
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from pylab import *
from numpy import fft
from numpy import linalg
from scipy import integrate
from scipy import interpolate
from numpy.polynomial import chebyshev
import os
from matplotlib import rc
rc("text", usetex=True)
from mpltools import style
style.use('ggplot')
figure(figsize=(5, 4))
ax = subplot()
# -----------------------------------------------------------------------------
# Plotting
# --------
SHOW_PLOT = True
SAVE_PLOT = False
PLOT_NAME = ""
PLOT_LINE = True
PLOT_POINTS = True
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), "paper/figures", PLOT_NAME) + ".pdf"
# -----------------------------------------------------------------------------
# Simulations
# -----------
T = 0.1
STEPS = T/1E-2
N = 20
METHOD = ["remove_points", "remove_lowest"][1]
SHAPE = ["circle", "ellipse", "blob"][2]
# -----------------------------------------------------------------------------
# Methods
# -------
def remove_points(shape):
return arctan((shape.curvature()-1)*5) + pi/2
def remove_lowest(shape):
n_fine = 1E2
y_min = amin(real(fft.ifft(change_n(shape.x_hat, n_fine), axis=0))[:,1])
g = 1/(5*absolute(shape.x[:,1] - y_min) + 0.5) - 0.5
g[g<0] = 0
return g
method = {"remove_points": remove_points, "remove_lowest": remove_lowest}[METHOD]
def circle(s):
x = cos(s)
y = sin(s)
return SpectralShape(vstack([x, y]).T)
def ellipse(s):
x = 2*cos(s)
y = sin(s)
return SpectralShape(vstack([x, y]).T)
def blob(s):
x = cos(s)*chebyshev.chebval((s-pi)/pi, [2,0,1])
y = sin(s)*chebyshev.chebval((s-pi)/pi, [2,0,0,0,1])
return SpectralShape(vstack([x, y]).T)
shape_func = {"circle":circle, "ellipse": ellipse, "blob": blob}[SHAPE]
# -----------------------------------------------------------------------------
DIGITS = 53;
TOL = sqrt(0.5**DIGITS)
def ellipse_circumference(a, b):
"""
Compute the circumference of an ellipse with semi-axes a and b.
Require a >= 0 and b >= 0. Relative accuracy is about 0.5^53.
"""
x = max(a, b)
y = min(a, b)
if DIGITS*y < TOL*x:
return 4 * x
s = 0
m = 1
while x-y > tol*y:
x = 0.5 * (x + y)
y = sqrt(x * y)
m *= 2
s += m * (x - y)**2
return pi * ((a + b)**2 - s) / (x + y)
def vnorm(x):
return sqrt(sum(x**2, axis=-1))
def vdot(a, b):
return sum(a * b, axis=-1)
def vcross(a, b):
return vcross(a, b)
def change_n(x_hat, n):
n_old = len(x_hat)
if n > n_old:
x_hat = insert(x_hat, int(n_old/2), zeros([n-n_old, 2]), axis=0)
else:
x_hat = take(x_hat, indices=fft_k(n), axis=0)
return (n / n_old) * x_hat
def fft_k(n):
return hstack(arange(n))
def fft_theta(n):
return linspace(0, 2*pi, n, endpoint=False)
def spectral_derivative(x_hat, p=1):
n = len(x_hat)
k = fft_k(n)[:,newaxis]
w_hat = x_hat * (1j*k)**p
if p % 2 == 1:
w_hat[n/2] = 0
return w_hat
# @apply_to_cols
# def spectral_integral(x_hat, n=1):
# k = arange(len(x_hat))
# w_hat = x_hat * hstack([0, (1/(1j*k[1:])**n)])
# if n % 2 == 1:
# w_hat[-1] = 0
# w_hat[0] = fft_theta(len(x_hat))
# return w_hat
PLOT_N = 1E2
def plot_spectral(x_hat):
s_fine = fft_theta(len(x_hat))
x_fine = real(fft.ifft(change_n(x_hat, PLOT_N), axis=0))
plot(s_fine, x_fine)
class SpectralShape(object):
def __init__(self, x):
self.x = x
def __len__(self):
return len(self.x_hat)
@property
def x(self):
return real(fft.ifft(self.x_hat, axis=0))
@x.setter
def x(self, value):
self.x_hat = fft.fft(value, axis=0)
def surface_normal(self):
x_dot = real(fft.ifft(spectral_derivative(self.x_hat), axis=0))
x_dot_n = x_dot[:,(1,0)] * [-1,1]
x_dot_n /= vnorm(x_dot_n)[:,newaxis]
return x_dot_n
def surface_tangent(self):
x_dot = real(fft.ifft(spectral_derivative(self.x_hat), axis=0))
x_dot /= vnorm(x_dot)[:,newaxis]
return x_dot
def curvature(self):
x_dot = real(fft.ifft(spectral_derivative(self.x_hat), axis=0))
x_ddot = real(fft.ifft(spectral_derivative(self.x_hat, p=2), axis=0))
kappa = vcross(x_dot, x_ddot) / vnorm(x_dot)**3
return kappa
def dxdt(self, method):
g = method(self)
dx_hatdt = g[:,newaxis] * self.surface_normal()
x_ddot = real(fft.ifft(spectral_derivative(self.x_hat, p=2), axis=0))
a_t = vdot(x_ddot, self.surface_tangent())
a_t *= norm(g) / norm(a_t)
dx_hatdt += a_t[:,newaxis] * self.surface_tangent()
return dx_hatdt
def plot(self, label=None):
x_fine = real(fft.ifft(change_n(self.x_hat, PLOT_N), axis=0))
color = ax._get_lines.color_cycle.next()
if PLOT_LINE:
ax.plot(x_fine[:,0], x_fine[:,1], color, label=label)
if PLOT_POINTS:
ax.plot(self.x[:,0], self.x[:,1], "x", color="{}".format(color))
axis('equal')
# -----------------------------------------------------------------------------
def run_simulation(shape, t_steps, method):
def func(x, t):
shape.x = x.reshape(-1,2)
return shape.dxdt(method).flatten()
x_simulation = integrate.odeint(func, shape.x.flatten(), t_steps)
x_simulation = x_simulation.reshape(len(t_steps), -1, 2)
for i in arange(STEPS, step=int(STEPS/2)):
shape.x = x_simulation[i]
shape.plot(label="t = {:.2f}".format(t_steps[i]))
legend()
savefig(filename)
show()
s = fft_theta(N)
shape = shape_func(s)
t = linspace(0, T, STEPS)
run_simulation(shape, t, method)
|
mit
|
tgquintela/TimeSeriesTools
|
TimeSeriesTools/Similarities/informationth_similarities.py
|
1
|
3596
|
"""
Module which groups all the information theory based measures of distances and
similarity of this package.
TODO
----
Discretization for transformation ts module
"""
from sklearn.metrics import mutual_info_score
import numpy as np
from ..Measures.information_theory_measures import entropy
from ..TS_statistics.ts_statistics import prob_xy, prob_x
def mutualInformation(X, bins):
"""Computation of the mutual information between each pair of variables in
the system.
"""
# Initialization of the values
n = X.shape[1]
MI = np.zeros((n, n))
# Loop over the possible combinations of pairs
for i in range(n):
for j in range(i, n):
aux = mutualInformation_1to1(X[:, i], X[:, j], bins)
# Assignation
MI[i, j] = aux
MI[j, i] = aux
return MI
def mutualInformation_1to1(x, y, bins):
"""Computation of the mutual information between two time-series.
Parameters
----------
x: array_like, shape (N,)
time series to compute difference.
y: array_like, shape (N,)
time series to compute difference.
bins: int or tuple of ints or tuple of arrays
the binning information.
Returns
-------
mi: float
the measure of mutual information between the two time series.
"""
## 1. Discretization
if bins is None:
# Compute contingency matrix
pass
else:
c_xy = np.histogram2d(x, y, bins)[0]
## 2. Compute mutual information from contingency matrix
mi = mutual_info_score(None, None, contingency=c_xy)
return mi
def conditional_entropy(x, y):
"""
TODO
----
Compute correctly
Check with the real matrices
"""
# Discretized signals
## xy and number of regimes
p_xy, _, _ = prob_xy(np.stack([x, y]).T)
p_x, _, _ = prob_x(x)
p_y, _, _ = prob_x(y)
# Conditional probability
p_x_y = np.divide(p_xy, p_y)
# Sum over possible combination of regimes
H_x_y = np.dot(p_xy, np.log(p_x_y))
return H_x_y
def information_GCI_ind(X, bins=None):
"""Baseline method to compute scores based on Information Geometry Causal
Inference, it boils down to computing the difference in entropy between
pairs of variables: scores(i, j) = H(j) - H(i)
Parameters
----------
X: array_like, shape(N,)
the time-series of the system.
bins: array_like, shape (Nintervals+1,)
information of binning or discretizing.
Returns
-------
scores: array_like, shape (Nelements, Nelements)
the matrix of the system.
Reference
---------
.. [1] P. Daniuis, D. Janzing, J. Mooij, J. Zscheischler, B. Steudel,
K. Zhang, B. Schalkopf: Inferring deterministic causal relations.
Proceedings of the 26th Annual Conference on Uncertainty in Artificial
Intelligence (UAI-2010).
http://event.cwi.nl/uai2010/papers/UAI2010_0121.pdf
"""
## Only for discrete signals!!!
# Compute the entropy
H = np.array([entropy(X[:, i]) for i in range(X.shape[1])])
## Compute the scores as entropy differences (vectorized :-))
n = len(H)
scores = np.zeros(shape=(n, n))
## Loop over all the possible pairs of elements
for i in range(n):
for j in range(n):
scores[i, j] = H[j] - H[i]
return scores
# import time
# t0 = time.time()
# mi1 = mutualInformation_1to1(X1, X2, 20)
# t1 = time.time()
# mi2 = calc_MI(X1, X2, 20)
# t2 = time.time()
# mi3 = mutualInformation_1to1(X1, X2, 20)
# print t1-t0
# print t2-t1
# print time.time()-t2
|
mit
|
mfjb/scikit-learn
|
examples/cluster/plot_cluster_comparison.py
|
246
|
4684
|
"""
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
|
bsd-3-clause
|
lyft/incubator-airflow
|
airflow/contrib/plugins/metastore_browser/main.py
|
4
|
6678
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''Plugins metabrowser'''
import json
from datetime import datetime
from typing import List
import pandas as pd
from flask import Blueprint, request
from flask_appbuilder import BaseView, expose
from airflow.plugins_manager import AirflowPlugin
from airflow.providers.apache.hive.hooks.hive import HiveCliHook, HiveMetastoreHook
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.providers.presto.hooks.presto import PrestoHook
from airflow.www.decorators import gzipped
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = [] # type: List[str]
DB_BLACKLIST = ['tmp'] # type: List[str]
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
class MetastoreBrowserView(BaseView):
"""
Creating a Flask-AppBuilder BaseView
"""
default_view = 'index'
@expose('/')
def index(self):
"""
Create default view
"""
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
"""
hook = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = hook.get_pandas_df(sql)
df.db = (
'<a href="/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render_template(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
"""
Create table view
"""
table_name = request.args.get("table")
metastore = HiveMetastoreHook(METASTORE_CONN_ID)
table = metastore.get_table(table_name)
return self.render_template(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
"""
Show tables in database
"""
db = request.args.get("db")
metastore = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(metastore.get_tables(db=db), key=lambda x: x.tableName)
return self.render_template(
"metastore_browser/db.html", tables=tables, db=db)
@gzipped
@expose('/partitions/')
def partitions(self):
"""
Retrieve table partitions
"""
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(table=table, schema=schema)
hook = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = hook.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@gzipped
@expose('/objects/')
def objects(self):
"""
Retrieve objects from TBLS and DBS
"""
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
hook = MySqlHook(METASTORE_MYSQL_CONN_ID)
data = [
{'id': row[0], 'text': row[0]}
for row in hook.get_records(sql)]
return json.dumps(data)
@gzipped
@expose('/data/')
def data(self):
"""
Retrieve data from table
"""
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
hook = PrestoHook(PRESTO_CONN_ID)
df = hook.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
"""
Retrieve table ddl
"""
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
hook = HiveCliHook(HIVE_CLI_CONN_ID)
return hook.run_cli(sql)
# Creating a flask blueprint to integrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
class MetastoreBrowserPlugin(AirflowPlugin):
"""
Defining the plugin class
"""
name = "metastore_browser"
flask_blueprints = [bp]
appbuilder_views = [{"name": "Hive Metadata Browser",
"category": "Plugins",
"view": MetastoreBrowserView()}]
|
apache-2.0
|
andrew-lundgren/gwpy
|
gwpy/tests/test_cli.py
|
1
|
5106
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `gwpy.cli` module
"""
import os
import tempfile
import importlib
import argparse
from numpy import random
from matplotlib import use
use('agg')
from compat import unittest
from gwpy.timeseries import TimeSeries
from gwpy.plotter import rcParams
__author__ = 'Duncan Macleod <[email protected]>'
TEST_GWF_FILE = os.path.join(os.path.split(__file__)[0], 'data',
'HLV-GW100916-968654552-1.gwf')
_, TEMP_PLOT_FILE = tempfile.mkstemp(prefix='GWPY-UNITTEST_', suffix='.png')
class CliTestMixin(object):
PRODUCT_NAME = 'gwpy.cli.cliproduct.CliProduct'
ACTION = None
TEST_ARGS = ['--chan', 'H1:LDAS-STRAIN', '--start', '968654552',
'--framecache', TEST_GWF_FILE]
def setUp(self):
self.PRODUCT_TYPE = self._import_product()
def _import_product(self):
modname, objname = self.PRODUCT_NAME.rsplit('.', 1)
mod = importlib.import_module(modname)
return getattr(mod, objname)
def test_init(self):
product = self.PRODUCT_TYPE()
def test_get_action(self):
self.assertEqual(self.PRODUCT_TYPE().get_action(), self.ACTION)
def test_init_cli(self):
parser = argparse.ArgumentParser()
product = self.PRODUCT_TYPE()
product.init_cli(parser)
self.assertGreater(len(parser._actions), 1)
return product, parser
def test_get_timeseries(self):
product, parser = self.test_init_cli()
args = parser.parse_args(self.TEST_ARGS + ['--out', TEMP_PLOT_FILE])
try:
try:
product.getTimeSeries(args)
except Exception as e:
if 'No reader' in str(e):
raise RuntimeError(str(e))
else:
raise
except (RuntimeError, ImportError):
product.timeseries = []
product.time_groups = []
product.start_list = []
for s in map(int, args.start):
product.start_list.append(s)
product.time_groups.append([])
for c in args.chan:
product.timeseries.append(
TimeSeries(random.random(1024 * 100), sample_rate=1024,
channel=c, epoch=s))
product.time_groups[-1].append(len(product.timeseries)-1)
return product, args
def test_gen_plot(self):
product, args = self.test_get_timeseries()
product.config_plot(args)
rcParams.update({'text.usetex': False,})
product.gen_plot(args)
return product, args
def test_config_plot(self):
product, parser = self.test_init_cli()
product.config_plot(parser.parse_args(self.TEST_ARGS))
def test_setup_xaxis(self):
product, args = self.test_gen_plot()
product.ax = product.plot.gca()
product.setup_xaxis(args)
def test_setup_yaxis(self):
product, args = self.test_gen_plot()
product.ax = product.plot.gca()
product.setup_yaxis(args)
def test_annotate_save_plot(self):
product, args = self.test_gen_plot()
try:
product.annotate_save_plot(args)
finally:
if os.path.isfile(args.out):
os.remove(args.out)
class CliTimeSeriesTests(CliTestMixin, unittest.TestCase):
PRODUCT_NAME = 'gwpy.cli.timeseries.TimeSeries'
ACTION = 'timeseries'
class CliSpectrumTests(CliTestMixin, unittest.TestCase):
PRODUCT_NAME = 'gwpy.cli.spectrum.Spectrum'
ACTION = 'spectrum'
class CliSpectrogramTests(CliTestMixin, unittest.TestCase):
PRODUCT_NAME = 'gwpy.cli.spectrogram.Spectrogram'
ACTION = 'spectrogram'
class CliCoherenceTests(CliTestMixin, unittest.TestCase):
PRODUCT_NAME = 'gwpy.cli.coherence.Coherence'
ACTION = 'coherence'
TEST_ARGS = CliTestMixin.TEST_ARGS + [
'--chan', 'L1:LDAS-STRAIN', '--secpfft', '0.25',
]
class CliCoherencegramTests(CliTestMixin, unittest.TestCase):
PRODUCT_NAME = 'gwpy.cli.coherencegram.Coherencegram'
ACTION = 'coherencegram'
# XXX coherencegram fails to generate using 1-second of input data
# which is probably fair enough
TEST_ARGS = ['--chan', 'X1:TEST-CHANNEL', '--chan', 'Y1:TEST-CHANNEL',
'--start', '968654552']
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
joshfuchs/ZZCeti_analysis
|
search_table.py
|
1
|
4321
|
'''
Written by JT Fuchs, UNC - Chapel Hill
Read in fitting_solutions.txt file and print requested information
'''
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
'''
Some code here to go find all the text files
'''
'''
fitting_solutions.txt contains: blue filename, red filenames, best model, best Teff, best logg, FWHM, best chi-square, date-time of fit.
'''
#catalog = Table.read('catalog_master_clean.txt',format='ascii')
catalog = Table.read('catalog_master.txt',format='ascii')
task = raw_input('What would you like to see? (star, range, filter, duplicates, allstars, K2) ')
if task == 'snr':
for x in range(0,len(catalog['FILENAME'])):
if catalog['SNR'][x] < 80.:
print catalog['FILENAME'][x], catalog['DATE-OBS'][x], catalog['SNR'][x]
if task == 'star':
star_name = raw_input('Name of the star? ')
for x in range(0,len(catalog['FILENAME'])):
if catalog['FILENAME'][x].lower().__contains__(star_name.lower()) == True:
print catalog['FILENAME'][x], catalog['b10teff'][x],catalog['b10logg'][x],catalog['g10teff'][x],catalog['g10logg'][x], catalog['DATE-OBS'][x]
#print 'b10: ', catalog['b10teff'][x], catalog['b10logg'][x]
#print 'alpha: ', catalog['ateff'][x], catalog['alogg'][x]
#print 'beta: ', catalog['bteff'][x], catalog['blogg'][x]
#print 'gamma: ', catalog['gteff'][x], catalog['glogg'][x]
#print 'delta: ', catalog['dteff'][x], catalog['dlogg'][x]
#print 'epsilon: ', catalog['eteff'][x], catalog['elogg'][x]
#print 'H8: ', catalog['H8teff'][x], catalog['H8logg'][x]
#print 'H9: ', catalog['H9teff'][x], catalog['H9logg'][x]
#print 'H10: ', catalog['H10teff'][x], catalog['H10logg'][x]
#print catalog[x]
if task == 'date':
star_name = raw_input('Date? ')
for x in range(0,len(catalog['FILENAME'])):
if catalog['DATE-OBS'][x].__contains__(star_name) == True:
print catalog['FILENAME'][x], catalog['b8teff'][x],catalog['b8logg'][x]
if task == 'range':
trange = raw_input('Would you like to set a temperature range? (yes/no) ')
if trange == 'yes':
tlower = float(raw_input('Lower temperature limit: '))
tupper = float(raw_input('Upper temperature limit: '))
else:
tlower = 1.
tupper = 100000.
grange = raw_input('Would you like to set a log(g) range? (yes/no) ')
if grange == 'yes':
glower = float(raw_input('Lower log(g) limit: '))
gupper = float(raw_input('Upper log(g) limit: '))
else:
glower = 1.
gupper = 20.
for x in np.arange(len(catalog['ateff'])):
if catalog['ateff'][x] <= tupper and catalog['ateff'][x] >= tlower and catalog['alogg'][x] <= gupper and catalog['alogg'][x] >= glower:
print catalog['FILENAME'][x], catalog['DATE-OBS'][x], catalog['b10teff'][x], catalog['b10logg'][x], catalog['g10teff'][x], catalog['g10logg'][x]
if task == 'filter':
which_filter = raw_input('Name of filter (DAV,K2,outburst): ')
yesno = raw_input('Yes or no?(0,1) ')
for x in range(0,len(catalog['FILENAME'])):
#print str(catalog[which_filter][x]), yesno, str(catalog[which_filter][x]) == yesno
if str(catalog[which_filter][x]) == yesno:
print catalog['FILENAME'][x], catalog['DATE-OBS'][x], catalog['Teff'][x], catalog['logg'][x], catalog[which_filter][x]
if task == 'duplicates':
for x in range(0,len(catalog['FILENAME'])):
for y in range(0,len(catalog['FILENAME'])):
if np.abs(catalog['RA'][x]-catalog['RA'][y]) < 0.01 and catalog['DATE-OBS'][x] != catalog['DATE-OBS'][y]:
print catalog['FILENAME'][x], catalog['DATE-OBS'][x], catalog['FILENAME'][y], catalog['DATE-OBS'][y]
if task == 'allstars':
for x in range(0,len(catalog['FILENAME'])):
print catalog['WD'][x], catalog['FILENAME'][x], catalog['DATE-OBS'][x], catalog['b10teff'][x], catalog['b10logg'][x],catalog['g10teff'][x], catalog['g10logg'][x]
if task == 'K2':
for x in range(0,len(catalog['FILENAME'])):
if catalog['K2'][x] == 1:
print catalog['WD'][x], catalog['DATE-OBS'][x], catalog['b10teff'][x], catalog['b10logg'][x], catalog['g10teff'][x], catalog['g10logg'][x]
|
mit
|
PrashntS/scikit-learn
|
examples/ensemble/plot_forest_iris.py
|
335
|
6271
|
"""
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
|
bsd-3-clause
|
kevinhughes27/TensorKart
|
record.py
|
1
|
6928
|
#!/usr/bin/env python
import numpy as np
import os
import shutil
import mss
import matplotlib
matplotlib.use('TkAgg')
from datetime import datetime
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg as FigCanvas
from PIL import ImageTk, Image
import sys
PY3_OR_LATER = sys.version_info[0] >= 3
if PY3_OR_LATER:
# Python 3 specific definitions
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.messagebox as tkMessageBox
else:
# Python 2 specific definitions
import Tkinter as tk
import ttk
import tkMessageBox
from utils import Screenshot, XboxController
IMAGE_SIZE = (320, 240)
IDLE_SAMPLE_RATE = 1500
SAMPLE_RATE = 200
class MainWindow():
""" Main frame of the application
"""
def __init__(self):
self.root = tk.Tk()
self.sct = mss.mss()
self.root.title('Data Acquisition')
self.root.geometry("660x325")
self.root.resizable(False, False)
# Init controller
self.controller = XboxController()
# Create GUI
self.create_main_panel()
# Timer
self.rate = IDLE_SAMPLE_RATE
self.sample_rate = SAMPLE_RATE
self.idle_rate = IDLE_SAMPLE_RATE
self.recording = False
self.t = 0
self.pause_timer = False
self.on_timer()
self.root.mainloop()
def create_main_panel(self):
# Panels
top_half = tk.Frame(self.root)
top_half.pack(side=tk.TOP, expand=True, padx=5, pady=5)
message = tk.Label(self.root, text="(Note: UI updates are disabled while recording)")
message.pack(side=tk.TOP, padx=5)
bottom_half = tk.Frame(self.root)
bottom_half.pack(side=tk.LEFT, padx=5, pady=10)
# Images
self.img_panel = tk.Label(top_half, image=ImageTk.PhotoImage("RGB", size=IMAGE_SIZE)) # Placeholder
self.img_panel.pack(side = tk.LEFT, expand=False, padx=5)
# Joystick
self.init_plot()
self.PlotCanvas = FigCanvas(figure=self.fig, master=top_half)
self.PlotCanvas.get_tk_widget().pack(side=tk.RIGHT, expand=False, padx=5)
# Recording
textframe = tk.Frame(bottom_half, width=332, height=15, padx=5)
textframe.pack(side=tk.LEFT)
textframe.pack_propagate(0)
self.outputDirStrVar = tk.StringVar()
self.txt_outputDir = tk.Entry(textframe, textvariable=self.outputDirStrVar, width=100)
self.txt_outputDir.pack(side=tk.LEFT)
self.outputDirStrVar.set("samples/" + datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
self.record_button = ttk.Button(bottom_half, text="Record", command=self.on_btn_record)
self.record_button.pack(side = tk.LEFT, padx=5)
def init_plot(self):
self.plotMem = 50 # how much data to keep on the plot
self.plotData = [[0] * (5)] * self.plotMem # mem storage for plot
self.fig = Figure(figsize=(4,3), dpi=80) # 320,240
self.axes = self.fig.add_subplot(111)
def on_timer(self):
self.poll()
# stop drawing if recording to avoid slow downs
if self.recording == False:
self.draw()
if not self.pause_timer:
self.root.after(self.rate, self.on_timer)
def poll(self):
self.img = self.take_screenshot()
self.controller_data = self.controller.read()
self.update_plot()
if self.recording == True:
self.save_data()
self.t += 1
def take_screenshot(self):
# Get raw pixels from the screen
sct_img = self.sct.grab({ "top": Screenshot.OFFSET_Y,
"left": Screenshot.OFFSET_X,
"width": Screenshot.SRC_W,
"height": Screenshot.SRC_H})
# Create the Image
return Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')
def update_plot(self):
self.plotData.append(self.controller_data) # adds to the end of the list
self.plotData.pop(0) # remove the first item in the list, ie the oldest
def save_data(self):
image_file = self.outputDir+'/'+'img_'+str(self.t)+'.png'
self.img.save(image_file)
# write csv line
self.outfile.write( image_file + ',' + ','.join(map(str, self.controller_data)) + '\n' )
def draw(self):
# Image
self.img.thumbnail(IMAGE_SIZE, Image.ANTIALIAS) # Resize
self.img_panel.img = ImageTk.PhotoImage(self.img)
self.img_panel['image'] = self.img_panel.img
# Joystick
x = np.asarray(self.plotData)
self.axes.clear()
self.axes.plot(range(0,self.plotMem), x[:,0], 'r')
self.axes.plot(range(0,self.plotMem), x[:,1], 'b')
self.axes.plot(range(0,self.plotMem), x[:,2], 'g')
self.axes.plot(range(0,self.plotMem), x[:,3], 'k')
self.axes.plot(range(0,self.plotMem), x[:,4], 'y')
self.PlotCanvas.draw()
def on_btn_record(self):
# pause timer
self.pause_timer = True
if self.recording:
self.recording = False
else:
self.start_recording()
if self.recording:
self.t = 0 # Reset our counter for the new recording
self.record_button["text"] = "Stop"
self.rate = self.sample_rate
# make / open outfile
self.outfile = open(self.outputDir+'/'+'data.csv', 'a')
else:
self.record_button["text"] = "Record"
self.rate = self.idle_rate
self.outfile.close()
# un pause timer
self.pause_timer = False
self.on_timer()
def start_recording(self):
should_record = True
# check that a dir has been specified
if not self.outputDirStrVar.get():
tkMessageBox.showerror(title='Error', message='Specify the Output Directory', parent=self.root)
should_record = False
else: # a directory was specified
self.outputDir = self.outputDirStrVar.get()
# check if path exists - i.e. may be saving over data
if os.path.exists(self.outputDir):
# overwrite the data, yes/no?
if tkMessageBox.askyesno(title='Warning!', message='Output Directory Exists - Overwrite Data?', parent=self.root):
# delete & re-make the dir:
shutil.rmtree(self.outputDir)
os.mkdir(self.outputDir)
# answer was 'no', so do not overwrite the data
else:
should_record = False
self.txt_outputDir.focus_set()
# directory doesn't exist, so make one
else:
os.mkdir(self.outputDir)
self.recording = should_record
if __name__ == '__main__':
app = MainWindow()
|
mit
|
ryandougherty/mwa-capstone
|
MWA_Tools/build/matplotlib/doc/mpl_examples/mplot3d/mixed_subplots_demo.py
|
12
|
1032
|
"""
Demonstrate the mixing of 2d and 3d subplots
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
def f(t):
s1 = np.cos(2*np.pi*t)
e1 = np.exp(-t)
return np.multiply(s1,e1)
################
# First subplot
################
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
t3 = np.arange(0.0, 2.0, 0.01)
# Twice as tall as it is wide.
fig = plt.figure(figsize=plt.figaspect(2.))
fig.suptitle('A tale of 2 subplots')
ax = fig.add_subplot(2, 1, 1)
l = ax.plot(t1, f(t1), 'bo',
t2, f(t2), 'k--', markerfacecolor='green')
ax.grid(True)
ax.set_ylabel('Damped oscillation')
#################
# Second subplot
#################
ax = fig.add_subplot(2, 1, 2, projection='3d')
X = np.arange(-5, 5, 0.25)
xlen = len(X)
Y = np.arange(-5, 5, 0.25)
ylen = len(Y)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
linewidth=0, antialiased=False)
ax.set_zlim3d(-1, 1)
plt.show()
|
gpl-2.0
|
mehdidc/scikit-learn
|
examples/neighbors/plot_kde_1d.py
|
347
|
5100
|
"""
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
|
bsd-3-clause
|
lmcinnes/umap
|
umap/spectral.py
|
1
|
12573
|
from warnings import warn
import numpy as np
import scipy.sparse
import scipy.sparse.csgraph
from sklearn.manifold import SpectralEmbedding
from sklearn.metrics import pairwise_distances
from sklearn.metrics.pairwise import _VALID_METRICS as SKLEARN_PAIRWISE_VALID_METRICS
from umap.distances import pairwise_special_metric, SPECIAL_METRICS
from umap.sparse import SPARSE_SPECIAL_METRICS, sparse_named_distances
def component_layout(
data,
n_components,
component_labels,
dim,
random_state,
metric="euclidean",
metric_kwds={},
):
"""Provide a layout relating the separate connected components. This is done
by taking the centroid of each component and then performing a spectral embedding
of the centroids.
Parameters
----------
data: array of shape (n_samples, n_features)
The source data -- required so we can generate centroids for each
connected component of the graph.
n_components: int
The number of distinct components to be layed out.
component_labels: array of shape (n_samples)
For each vertex in the graph the label of the component to
which the vertex belongs.
dim: int
The chosen embedding dimension.
metric: string or callable (optional, default 'euclidean')
The metric used to measure distances among the source data points.
metric_kwds: dict (optional, default {})
Keyword arguments to be passed to the metric function.
If metric is 'precomputed', 'linkage' keyword can be used to specify
'average', 'complete', or 'single' linkage. Default is 'average'
Returns
-------
component_embedding: array of shape (n_components, dim)
The ``dim``-dimensional embedding of the ``n_components``-many
connected components.
"""
if data is None:
# We don't have data to work with; just guess
return np.random.random(size=(n_components, dim)) * 10.0
component_centroids = np.empty((n_components, data.shape[1]), dtype=np.float64)
if metric == "precomputed":
# cannot compute centroids from precomputed distances
# instead, compute centroid distances using linkage
distance_matrix = np.zeros((n_components, n_components), dtype=np.float64)
linkage = metric_kwds.get("linkage", "average")
if linkage == "average":
linkage = np.mean
elif linkage == "complete":
linkage = np.max
elif linkage == "single":
linkage = np.min
else:
raise ValueError(
"Unrecognized linkage '%s'. Please choose from "
"'average', 'complete', or 'single'" % linkage
)
for c_i in range(n_components):
dm_i = data[component_labels == c_i]
for c_j in range(c_i + 1, n_components):
dist = linkage(dm_i[:, component_labels == c_j])
distance_matrix[c_i, c_j] = dist
distance_matrix[c_j, c_i] = dist
else:
for label in range(n_components):
component_centroids[label] = data[component_labels == label].mean(axis=0)
if scipy.sparse.isspmatrix(component_centroids):
warn(
"Forcing component centroids to dense; if you are running out of "
"memory then consider increasing n_neighbors."
)
component_centroids = component_centroids.toarray()
if metric in SPECIAL_METRICS:
distance_matrix = pairwise_special_metric(
component_centroids, metric=metric, kwds=metric_kwds,
)
elif metric in SPARSE_SPECIAL_METRICS:
distance_matrix = pairwise_special_metric(
component_centroids,
metric=SPARSE_SPECIAL_METRICS[metric],
kwds=metric_kwds,
)
else:
if callable(metric) and scipy.sparse.isspmatrix(data):
function_to_name_mapping = {
sparse_named_distances[k]: k for k in
set(SKLEARN_PAIRWISE_VALID_METRICS) &
set(sparse_named_distances.keys())
}
try:
metric_name = function_to_name_mapping[metric]
except KeyError:
raise NotImplementedError(
"Multicomponent layout for custom "
"sparse metrics is not implemented at "
"this time."
)
distance_matrix = pairwise_distances(
component_centroids, metric=metric_name, **metric_kwds
)
else:
distance_matrix = pairwise_distances(
component_centroids, metric=metric, **metric_kwds
)
affinity_matrix = np.exp(-(distance_matrix ** 2))
component_embedding = SpectralEmbedding(
n_components=dim, affinity="precomputed", random_state=random_state
).fit_transform(affinity_matrix)
component_embedding /= component_embedding.max()
return component_embedding
def multi_component_layout(
data,
graph,
n_components,
component_labels,
dim,
random_state,
metric="euclidean",
metric_kwds={},
):
"""Specialised layout algorithm for dealing with graphs with many connected components.
This will first fid relative positions for the components by spectrally embedding
their centroids, then spectrally embed each individual connected component positioning
them according to the centroid embeddings. This provides a decent embedding of each
component while placing the components in good relative positions to one another.
Parameters
----------
data: array of shape (n_samples, n_features)
The source data -- required so we can generate centroids for each
connected component of the graph.
graph: sparse matrix
The adjacency matrix of the graph to be emebdded.
n_components: int
The number of distinct components to be layed out.
component_labels: array of shape (n_samples)
For each vertex in the graph the label of the component to
which the vertex belongs.
dim: int
The chosen embedding dimension.
metric: string or callable (optional, default 'euclidean')
The metric used to measure distances among the source data points.
metric_kwds: dict (optional, default {})
Keyword arguments to be passed to the metric function.
Returns
-------
embedding: array of shape (n_samples, dim)
The initial embedding of ``graph``.
"""
result = np.empty((graph.shape[0], dim), dtype=np.float32)
if n_components > 2 * dim:
meta_embedding = component_layout(
data,
n_components,
component_labels,
dim,
random_state,
metric=metric,
metric_kwds=metric_kwds,
)
else:
k = int(np.ceil(n_components / 2.0))
base = np.hstack([np.eye(k), np.zeros((k, dim - k))])
meta_embedding = np.vstack([base, -base])[:n_components]
for label in range(n_components):
component_graph = graph.tocsr()[component_labels == label, :].tocsc()
component_graph = component_graph[:, component_labels == label].tocoo()
distances = pairwise_distances([meta_embedding[label]], meta_embedding)
data_range = distances[distances > 0.0].min() / 2.0
if component_graph.shape[0] < 2 * dim or component_graph.shape[0] <= dim + 1:
result[component_labels == label] = (
random_state.uniform(
low=-data_range,
high=data_range,
size=(component_graph.shape[0], dim),
)
+ meta_embedding[label]
)
continue
diag_data = np.asarray(component_graph.sum(axis=0))
# standard Laplacian
# D = scipy.sparse.spdiags(diag_data, 0, graph.shape[0], graph.shape[0])
# L = D - graph
# Normalized Laplacian
I = scipy.sparse.identity(component_graph.shape[0], dtype=np.float64)
D = scipy.sparse.spdiags(
1.0 / np.sqrt(diag_data),
0,
component_graph.shape[0],
component_graph.shape[0],
)
L = I - D * component_graph * D
k = dim + 1
num_lanczos_vectors = max(2 * k + 1, int(np.sqrt(component_graph.shape[0])))
try:
eigenvalues, eigenvectors = scipy.sparse.linalg.eigsh(
L,
k,
which="SM",
ncv=num_lanczos_vectors,
tol=1e-4,
v0=np.ones(L.shape[0]),
maxiter=graph.shape[0] * 5,
)
order = np.argsort(eigenvalues)[1:k]
component_embedding = eigenvectors[:, order]
expansion = data_range / np.max(np.abs(component_embedding))
component_embedding *= expansion
result[component_labels == label] = (
component_embedding + meta_embedding[label]
)
except scipy.sparse.linalg.ArpackError:
warn(
"WARNING: spectral initialisation failed! The eigenvector solver\n"
"failed. This is likely due to too small an eigengap. Consider\n"
"adding some noise or jitter to your data.\n\n"
"Falling back to random initialisation!"
)
result[component_labels == label] = (
random_state.uniform(
low=-data_range,
high=data_range,
size=(component_graph.shape[0], dim),
)
+ meta_embedding[label]
)
return result
def spectral_layout(data, graph, dim, random_state, metric="euclidean", metric_kwds={}):
"""Given a graph compute the spectral embedding of the graph. This is
simply the eigenvectors of the laplacian of the graph. Here we use the
normalized laplacian.
Parameters
----------
data: array of shape (n_samples, n_features)
The source data
graph: sparse matrix
The (weighted) adjacency matrix of the graph as a sparse matrix.
dim: int
The dimension of the space into which to embed.
random_state: numpy RandomState or equivalent
A state capable being used as a numpy random state.
Returns
-------
embedding: array of shape (n_vertices, dim)
The spectral embedding of the graph.
"""
n_samples = graph.shape[0]
n_components, labels = scipy.sparse.csgraph.connected_components(graph)
if n_components > 1:
return multi_component_layout(
data,
graph,
n_components,
labels,
dim,
random_state,
metric=metric,
metric_kwds=metric_kwds,
)
diag_data = np.asarray(graph.sum(axis=0))
# standard Laplacian
# D = scipy.sparse.spdiags(diag_data, 0, graph.shape[0], graph.shape[0])
# L = D - graph
# Normalized Laplacian
I = scipy.sparse.identity(graph.shape[0], dtype=np.float64)
D = scipy.sparse.spdiags(
1.0 / np.sqrt(diag_data), 0, graph.shape[0], graph.shape[0]
)
L = I - D * graph * D
k = dim + 1
num_lanczos_vectors = max(2 * k + 1, int(np.sqrt(graph.shape[0])))
try:
if L.shape[0] < 2000000:
eigenvalues, eigenvectors = scipy.sparse.linalg.eigsh(
L,
k,
which="SM",
ncv=num_lanczos_vectors,
tol=1e-4,
v0=np.ones(L.shape[0]),
maxiter=graph.shape[0] * 5,
)
else:
eigenvalues, eigenvectors = scipy.sparse.linalg.lobpcg(
L, random_state.normal(size=(L.shape[0], k)), largest=False, tol=1e-8
)
order = np.argsort(eigenvalues)[1:k]
return eigenvectors[:, order]
except scipy.sparse.linalg.ArpackError:
warn(
"WARNING: spectral initialisation failed! The eigenvector solver\n"
"failed. This is likely due to too small an eigengap. Consider\n"
"adding some noise or jitter to your data.\n\n"
"Falling back to random initialisation!"
)
return random_state.uniform(low=-10.0, high=10.0, size=(graph.shape[0], dim))
|
bsd-3-clause
|
lounick/task_scheduling
|
task_scheduling/utils.py
|
1
|
12298
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, lounick and decabyte
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of task_scheduling nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility Module
This module provides helper functions to be used with task_scheduling problems.
Notes
-----
Documention of this project is following the Numpy guidelines. These can be seen in action in the example file from the
Napoleon project (`example_numpy.py`_).
.. _example_numpy.py:
http://sphinxcontrib-napoleon.readthedocs.org/en/latest/example_numpy.html
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import time
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
np.set_printoptions(precision=3, suppress=True)
np.random.seed(int(time.time()))
def generate_nodes(n=20, lb=-50, ub=50, dims=2, **kwargs):
"""Generate a random set of `n` nodes of `dims` coordinates using a discrete uniform distribution (lb, ub].
Parameters
----------
n
lb
ub
dims
kwargs
Returns
-------
nodes: ndarray (n, dims)
List of randomly generates coordinates for problem nodes.
Examples
--------
This function is used to generate a new random set of nodes to be used as input for a scheduling problem.
>>> import numpy as np
>>> np.random.seed(42)
>>> nodes = generate_nodes(n=2, lb=-10, up=10, dims=2)
>>> nodes.flatten()
array([28, 41, 18, 4])
"""
return np.random.randint(lb, ub, (n, dims))
def calculate_distances(nodes):
n = np.atleast_2d(nodes).shape[0]
distances = np.zeros((n, n))
for k in xrange(n):
for p in xrange(n):
distances[k, p] = np.linalg.norm(nodes[k, :] - nodes[p, :])
return distances
def solve_problem(solver, cost, **kwargs):
"""Generic wrapper for library solvers. Useful for command-line examples or tests.
Parameters
----------
solver
cost
kwargs
Returns
-------
solution: list
list of node indexes that form a solution
solution: float
value of the objective for the found solution
model: object
model object after running the solver
"""
st = time.time()
solution, objective, model = solver(cost, **kwargs)
dt = time.time() - st
print('Solving problem using [{}] solver\n'.format(solver.__name__))
print('Time to Solve: %.2f secs' % dt)
print('Objective: %.3f' % objective)
print('Solution: %s\n' % solution)
return solution, objective, model
def _set_plot_style():
"""Set the global matplotlib using the project's default style.
Notes
-----
This configuration affects all the tests and the examples included in this package.
"""
if 'bmh' in mpl.style.available:
mpl.style.use('bmh')
mpl.rcParams['figure.figsize'] = _get_figsize(scale=2.0)
# mpl.rcParams[''] = 'tight'
def _get_figsize(scale=1.0):
"""Calculate figure size using aestetic ratio.
Parameters
----------
scale : float
Scaling parameter from basic aspect (6.85:4.23).
Returns
-------
figsize : tuple
A 2-element tuple (w, h) defining the width and height of a figure.
Examples
--------
This function is used to configure matplotlib.
>>> import numpy as np
>>> np.allclose(_get_figsize(1.0), (6.85, 4.23), atol=0.1)
True
"""
fig_width_pt = 495.0 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0) - 1.0) / 2.0
fig_width = fig_width_pt * inches_per_pt * scale # width in inches
fig_height = fig_width * golden_mean # height in inches
return (fig_width, fig_height)
def plot_problem(nodes, solution, objective):
"""Plot the results of a problem in 2D coordinates (X-Y).
Parameters
----------
nodes
solution
objective
Returns
-------
fig: object
figure object
ax: object
axes object
"""
# init plot
_set_plot_style()
fig, ax = plt.subplots()
# plot vertexes
ax.plot(nodes[:, 1], nodes[:, 0], 'o', ms=8, label='nodes')
# ax.plot(nodes[:, 0], nodes[:, 1], 'o', ms=8, label='nodes')
# # add labels
# for n in xrange(len(idx)):
# x, y = nodes[n, 1], nodes[n, 0]
# xt, yt = x - 0.10 * np.abs(x), y - 0.10 * np.abs(y)
#
# ax.annotate('#%d' % n, xy=(x, y), xycoords='data', xytext=(xt,yt))
# normalize solution(s)
indexes = []
if len(solution) > 0:
if type(solution[0]) == list:
indexes.extend(solution)
else:
indexes.append(solution)
# plot solution(s)
for n, idx in enumerate(indexes):
# route plots
route = nodes[idx, :]
# ax.plot(route[:, 0], route[:, 1], '--', alpha=0.8, label='route #{}'.format(n))
ax.plot(route[:, 1], route[:, 0], '--', alpha=0.8, label='route #{}'.format(n))
# add route order
for k, n in enumerate(idx):
# x, y = nodes[n, 0], nodes[n, 1]
x, y = nodes[n, 1], nodes[n, 0]
xt, yt = x + 0.05 * np.abs(x), y + 0.05 * np.abs(y)
xt, yt = x + 0.15, y + 0.15
ax.annotate(str(k), xy=(x, y), xycoords='data', xytext=(xt, yt))
# adjust plot features
ax.axis('equal')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xnew = (xlim[0] - np.abs(xlim[0] * 0.05), xlim[1] + np.abs(xlim[1] * 0.05))
ynew = (ylim[0] - np.abs(ylim[0] * 0.05), ylim[1] + np.abs(ylim[1] * 0.05))
# xnew = (xlim[0] - 2, xlim[1] + 2)
# ynew = (ylim[0] - 1, ylim[1] + 1)
ax.set_xlim(xnew)
ax.set_ylim(ynew)
ax.legend(loc='best')
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
ax.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
ax.grid(which='minor')
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
ax.set_title('Problem Solution')
return fig, ax
def plot_problem_3d(nodes, solution, cost_total):
"""Plot the results of a problem in 3D coordinates.
Parameters
----------
nodes
solution
cost_total
Returns
-------
fig: object
figure object
ax: object
axes object
"""
# init plot
_set_plot_style()
fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))
nodes = np.atleast_2d(nodes)
# plot vertexes
ax.scatter(nodes[:, 1], nodes[:, 0], nodes[:,2], label='nodes')
# # add labels
# for n in xrange(len(idx)):
# x, y = nodes[n, 1], nodes[n, 0]
# xt, yt = x - 0.10 * np.abs(x), y - 0.10 * np.abs(y)
#
# ax.annotate('#%d' % n, xy=(x, y), xycoords='data', xytext=(xt,yt))
# normalize solution(s)
indexes = []
if len(solution) > 0:
if type(solution[0]) == list:
indexes.extend(solution)
else:
indexes.append(solution)
# plot solution(s)
for n, idx in enumerate(indexes):
# route plots
route = nodes[idx, :]
ax.plot(nodes[idx, 1], nodes[idx, 0], route[:, 2], '--', alpha=0.8, label='#{}'.format(n))
# # add route order
# for k, n in enumerate(idx):
# x, y, z = nodes[n, 1], nodes[n, 0], nodes[n, 2]
# xt, yt, zt = x + 0.05 * np.abs(x), y + 0.05 * np.abs(y), z + 0.05 * np.abs(z)
#
# ax.annotate(str(k), xy=(x, y), xycoords='data', xytext=(xt, yt))
# adjust plot features
ax.legend(loc='best')
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
ax.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
ax.zaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
ax.grid(which='minor')
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
ax.set_zlabel('Z (m)')
ax.set_title('Problem Solution')
return fig, ax
def plot_problem_correlation_circles(nodes, solution, objective, max_range=2.0):
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
fig, ax = plot_problem(nodes, solution, objective)
ax.hold(True)
indexes = []
if len(solution) > 0:
if type(solution[0]) == list:
indexes.extend(solution)
else:
indexes.append(solution)
circles = []
for n, idx in enumerate(indexes):
# route plots
route = nodes[idx, :]
for node in route[9:10]:#len(route) - 1]:
# print("Node: {0}".format(node))
circle = mpatches.Circle((node[1],node[0]), radius=max_range, fill=False, edgecolor='blue', linestyle='dotted', lw=1.5, label='Max sensor range')
circles.append(circle)
# collection = PatchCollection(circles, cmap=plt.cm.hsv, alpha=0.3)
# ax.add_collection(collection)
for p in circles:
ax.add_patch(p)
return fig, ax
def plot_problem_correlation_gradient(nodes, solution, objective, max_range=2.0):
def cor(x, y, centre, max_range):
alpha = -np.log(0.01)/max_range
val = np.exp(-alpha*np.sqrt((centre[1]-x)**2 + (centre[0]-y)**2))
# for i in range(len(val)):
# for j in range(len(val[i])):
# if val[i][j] < 0.001:
# val[i][j] = 0
return val
fig, ax = plot_problem(nodes, solution, objective)
ax.hold(True)
(xmin, xmax) = ax.get_xlim()
(ymin, ymax) = ax.get_ylim()
extent = xmin, xmax, ymin, ymax
dx, dy = 0.01, 0.01
indexes = []
if len(solution) > 0:
if type(solution[0]) == list:
indexes.extend(solution)
else:
indexes.append(solution)
for n, idx in enumerate(indexes):
# route plots
route = nodes[idx, :]
ims = []
x = np.arange(xmin, xmax, dx)
y = np.arange(ymax, ymin, -dy)
X, Y = np.meshgrid(x, y)
Z = cor(X, Y, route[1], max_range)
for node in route[1:len(route)-1]:
Z += cor(X, Y, node, max_range)
# plt.cm.jet
ax.imshow(Z, cmap="Blues", alpha=.9, interpolation='bilinear',extent=extent)
# ax.legend(loc=(0.6,0.9))
# fig.set_dpi(300)
return fig, ax
def generate_grid(x_size, y_size=None, idx_start=[0,0]):
import math
if y_size is None:
y_size = x_size
nodes = []
nodes.append(idx_start)
idx_finish = [idx_start[0]+x_size+1, idx_start[1]]
y_2_floor = int(math.floor(y_size/2.0))
y_2_ceil = int(math.ceil(y_size/2.0))
y_start_ceil = int(math.ceil(idx_start[1]))
for i in range(int(idx_start[0]+1), int(idx_start[0]+x_size+1)):
for j in range(-y_2_floor + y_start_ceil, y_2_ceil + y_start_ceil):
nodes.append([i,j])
nodes.append(idx_finish)
return nodes
|
bsd-3-clause
|
thorwhalen/ut
|
stats/bin_est/shapley.py
|
1
|
20067
|
"""Shapley value analysis"""
__author__ = 'thor'
from numpy import unique, concatenate, sort
import numpy as np
import pandas as pd
from collections import Counter, OrderedDict, defaultdict
import string
import random as rnd
import itertools
from scipy.misc import factorial
from ut.util.uiter import all_subsets_of, powerset
from ut.stats.bin_est.set_est import Shapley as Shapley_1
# from ut.daf.manip import rollin_col
def _coalition_of(iter_of_items):
return tuple(unique(iter_of_items))
def compute_shapley_values_from_coalition_values(coalition, normalize=False, verbose=False):
return compute_shapley_values_from_coalition_values_using_formula(coalition, normalize=normalize, verbose=verbose)
def compute_shapley_values_from_coalition_values_using_formula(coalition_values, normalize=False, verbose=False):
"""
Computes the Shapley values of a game specified by coalition values.
See https://en.wikipedia.org/wiki/Shapley_value.
:param coalition_values: The definition of the game (a dict of values of coalitions of players)
:param normalize: True or [False]: Whether to normalize the Shapley values so they sum to 1
:param verbose: True or [False]: Whether to print info while computing the Shapley values
:return: Shapley values of the game specified by coalition_values
"""
players = _universe_set_of_keys_of_dict(coalition_values)
n = len(players)
factorial_n = float(factorial(n))
if verbose:
print(("Normalizing factor: {}".format(factorial_n)))
def _shapley_unnormalized_weight(s):
return factorial(s) * factorial(n - s - 1) # all possible permutations of players before and after
coalition_values = defaultdict(float, coalition_values)
# print coalition_values
shapley_values = dict()
for player in players:
if verbose:
print(("\n-------------------- {} ----------------------".format(player)))
shapley_values[player] = 0.0
for s in map(_coalition_of, powerset(players - {player})):
shapley_values[player] += \
_shapley_unnormalized_weight(len(s)) \
* (coalition_values[_coalition_of(list(set(s).union({player})))] - coalition_values[s])
if verbose:
weight = _shapley_unnormalized_weight(len(s))
s_with_player = coalition_values[_coalition_of(list(set(s).union({player})))]
s_alone = coalition_values[s]
print(("... contributed {} * ({} - {}) = {} \tto {} \t(running sum is {})"
.format(weight,
s_with_player,
s_alone,
weight * (s_with_player - s_alone),
_coalition_of(list(set(s).union({player}))),
shapley_values[player]
)
))
shapley_values[player] /= factorial_n # normalize according to all possible permutations
if normalize:
return _normalize_dict_values(shapley_values)
else:
return shapley_values
def _shapley_weight(s, n):
return (factorial(s) * factorial(n - s - 1)) / float(factorial(n))
def compute_shapley_values_from_coalition_values_01(coalition_values, normalize=False, verbose=False):
_complete_missing_coalitions_with_zero_valued_coalitions_in_place(coalition_values)
coalition_values = pd.DataFrame(index=list(coalition_values.keys()),
data=list(coalition_values.values()),
columns=['value'])
se = Shapley_1(coalition_values, success='value')
se.change_type_of_d_index(tuple)
shapley_values = se.compute_shapley_values()
if normalize:
return _normalize_dict_values(shapley_values)
else:
return shapley_values
def compute_shapley_values_from_unit_valued_sequences(sequences, normalize=False):
dm = ShapleyDataModel()
dm.absorb_coalition_obs(sequences)
coalition_values = dm.coalition_values()
return compute_shapley_values_from_coalition_values(coalition_values, normalize=normalize)
# def compute_shapley_values_from_valued_sequences(sequence_and_value_dict, normalize=False):
# dm = ShapleyDataModel()
# dm.absorb_sequence_into_coalition_obs(sequence_and_value_dict)
# coalition_values = dm.coalition_values()
#
# return compute_shapley_values_from_coalition_values(coalition_values, normalize=normalize)
def _normalize_dict_values(d):
value_sum = float(np.sum(list(d.values())))
return {k: v / value_sum for k, v in list(d.items())}
def all_proper_subsets_iterator(superset):
return itertools.chain(
*map(lambda subset_size: itertools.combinations(superset, subset_size),
list(range(1, len(superset)))))
def all_subsets_or_eq_iterator(superset):
return itertools.chain(
*map(lambda subset_size: itertools.combinations(superset, subset_size),
list(range(1, len(superset) + 1))))
def all_superset_iterator(subset, universe_set):
subset = set(subset)
remaining_set = set(universe_set).difference(subset)
return map(lambda x: tuple(subset.union(x)), all_subsets_or_eq_iterator(remaining_set))
def _universe_set_of_keys_of_dict(d):
return set(itertools.chain(*list(d.keys())))
def _complete_missing_coalitions_with_zero_valued_coalitions_in_place(coalition_values, universe_set=None):
"""
complete coalition_contributions with missing combinations (assigning 0.0 to them)
"""
if universe_set is None:
universe_set = set(itertools.chain(*list(coalition_values.keys())))
superset = ShapleyDataModel.coalition_of(list(universe_set))
coalition_values[superset] = coalition_values.get(superset, 0.0)
for subset in map(_coalition_of, all_proper_subsets_iterator(universe_set)):
coalition_values[subset] = coalition_values.get(subset, 0.0)
class ShapleyDataModel(object):
def __init__(self, data=None, data_type=None):
"""
Inputs:
* data: data used to make the coalition values.
* data_type: type of data, either:
- 'coalition_obs': data is the counter (a dict) of coalition_obs directly
- 'coalition_obs_collection': a coalition_obs dict to be added to the existing
- 'item_collections': an iterator of sequences to absorbe to make the coalition_obs
"""
self.coalition_obs = Counter()
self.item_list = []
self._coalition_size_map = None
if data is not None:
# if data_type not given, determine
if data_type is None:
if isinstance(data, Counter):
data_type = 'coalition_obs'
else:
data_type = 'item_collections'
# according to type, process and set data
if data_type == 'coalition_obs':
self.coalition_obs = data
elif data_type == 'coalition_obs_collection':
self.absorb_coalition_obs(data)
elif data_type == 'item_collections':
for d in data:
self.absorb_sequence_into_coalition_obs(d)
@staticmethod
def coalition_of(iter_of_items):
return tuple(unique(iter_of_items))
def absorb_sequence_into_coalition_obs(self, seq):
"""
Updates the self.coalition_obs with the input coalition (a list of items)
"""
self.coalition_obs.update([self.coalition_of(seq)])
return self
def absorb_coalition(self, collection_of_items_of_single_coalition):
"""
Updates the self.coalition_obs with the input coalition (a list of items)
"""
raise DeprecationWarning(
"absorb_coalition is being deprecated. Use absorb_sequence_into_coalition_obs() instead")
self.coalition_obs.update([self.coalition_of(collection_of_items_of_single_coalition)])
return self
def absorb_coalition_obs(self, coalition_obs_data):
"""
Updates the self.coalition_obs with the input dict of {coalition: obs_value}
"""
try:
for coalition, value in coalition_obs_data.items():
self.absorb_coalition_and_value(coalition, value)
except AttributeError:
try:
for seq in coalition_obs_data:
self.absorb_sequence_into_coalition_obs(seq)
except TypeError:
for seq in coalition_obs_data:
self.absorb_coalition_and_value(seq['coalition'], seq['value'])
return self
# coalition_obs_dict = \
# {self.coalition_of(coalition): value for coalition, value in coalition_obs_dict.iteritems()}
# self.coalition_obs.update(coalition_obs_dict)
# self.absorb_coalition_and_value(coalition_obs_dict.keys()[0], coalition_obs_dict.values()[0])
def absorb_coalition_and_value(self, coalition, value):
"""
Updates the self.coalition_obs with the input dict of coalition: obs_value
"""
self.coalition_obs.update({self.coalition_of(coalition): value})
return self
def coalition_values(self, coalition_obs=None, verbose=False):
"""
Computes the coalition_values from coalition_obs (counts or other values).
To do this, we accumulate the counts of all subsets of each unique coalition.
"""
if coalition_obs is None:
coalition_obs = self.coalition_obs
coalition_contributions = Counter(coalition_obs)
if verbose:
print(coalition_contributions)
universe_set = set(self.mk_item_list(coalition_obs=coalition_obs))
for coalition, count in coalition_obs.items(): # for every coalition
# ... get all non-empty strict subsets of this coalition, and assign the mother coalition count
superset_counts = {
self.coalition_of(sub_coalition): count
for sub_coalition in all_superset_iterator(coalition, universe_set)
}
# ... update the coalition_values counter with these counts
coalition_contributions.update(superset_counts)
if verbose:
print((" after {} contributions:\n {}".format(coalition, coalition_contributions)))
# # complete coalition_contributions with missing combinations (assigning 0.0 to them)
# _complete_missing_coalitions_with_zero_valued_coalitions_in_place(coalition_contributions)
return coalition_contributions
def coalition_size_map(self):
if not self._coalition_size_map:
self._coalition_size_map = defaultdict(dict)
for coalition, count in self.coalition_obs.items():
self._coalition_size_map[len(coalition)].update({coalition: count})
self._coalition_size_map = OrderedDict(sorted(list(self._coalition_size_map.items()), key=lambda t: t[0]))
return self._coalition_size_map
def mk_poset(self):
d = defaultdict(list)
_coalition_size_map = self.coalition_size_map()
coalition_sizes = sorted(_coalition_size_map.keys())
# TODO: Finish, if necessary
def mk_item_list(self, coalition_obs=None):
if coalition_obs is None:
coalition_obs = self.coalition_obs
item_list = unique(concatenate(list(coalition_obs.keys())))
self.item_list = item_list
return item_list
def _test_shapley_data_model():
list_of_coalitions = [['A', 'B', 'C'], ['A', 'C', 'B'], ['B', 'A', 'C'], ['A', 'A', 'B', 'C'],
['C', 'A'], ['B', 'C'], ['C', 'B'], ['C', 'B'], ['A']]
dm = ShapleyDataModel() # initialize the data model
for coalition in list_of_coalitions: # count the coalitions
dm.absorb_sequence_into_coalition_obs(coalition)
assert dm.coalition_obs == Counter({('A', 'B', 'C'): 4, ('B', 'C'): 3, ('A',): 1, ('A', 'C'): 1}), \
"Unexpected result for dm.coalition_obs"
print("All good in _test_shapley_data_model")
def rand_shapley_values(items=3):
if isinstance(items, int):
items = ','.join(string.ascii_uppercase[:items]).split(',')
if isinstance(items, list):
items = {items[i]: 2**i for i in range(len(items))}
return items
class LinearValuedCoalitionGenerator(object):
def __init__(self, shapley_values=3, normalize=False):
shapley_values = shapley_values or 3
if not isinstance(shapley_values, dict):
shapley_values = rand_shapley_values(items=shapley_values)
self.shapley_values = shapley_values
if normalize:
self.shapley_values = _normalize_dict_values(self.shapley_values)
@staticmethod
def coalition_of(coalition):
return tuple(sort(coalition))
def coalition_value(self, coalition):
return sum([self.shapley_values[item] for item in coalition])
def rand_coalition(self):
return self.coalition_of(rnd.sample(list(self.shapley_values.keys()), rnd.randint(1, len(self.shapley_values))))
def rand_coalition_obs(self):
coalition = self.rand_coalition()
return {coalition: self.coalition_value(coalition)}
def rand_coalition_obs_cum(self, n_draws=None):
n_draws = n_draws or len(self.shapley_values) / 2
coalition_obs = Counter()
for x in itertools.starmap(self.rand_coalition_obs, itertools.repeat([], n_draws)):
coalition_obs.update(x)
return coalition_obs
def coalition_values(self):
return {self.coalition_of(coalition): self.coalition_value(coalition)
for coalition in all_subsets_of(list(self.shapley_values.keys()), include_empty_set=False)}
# class ShapleyDataModel_old(object):
# def __init__(self, item_seperator=','):
# """
# Inputs:
# * item_seperator will be used to construct string hashes from lists.
# You should choose a character that never shows up in the items, or you'll get problems.
# Other attributes:
# * coalition_obs is a Counter of coalitions
# * coalition_values is also a Counter of coalitions, but it counts not only
# the coalition_obs, but all non-empty subsets of the latter.
# """
# self.coalition_obs = Counter()
# self.coalition_values = None
# self.item_seperator = item_seperator
# self.contribution_df = None
# self.item_list = []
#
# def absorb_coalition(self, coalition):
# """
# Updates the self.coalition_obs with the input coalition (a list of items)
# """
# self.coalition_obs.update([self._list_to_key(coalition)])
#
# def mk_coalition_size_map(self):
#
# d = defaultdict(list)
# for coalition, count in self.coalition_obs.iteritems():
# d[len(self._key_to_list(coalition))].append({coalition: count})
# return d
#
# def mk_coalition_contributions(self, verbose=False):
# """
# Computes the self.coalition_values attribute.
# To do this, we accumulate the counts of all subsets of each unique coalition.
# """
# # init with coalition_obs
# self.coalition_values = Counter(self.coalition_obs)
# if verbose:
# print(self.coalition_values)
# for coalition, count in self.coalition_obs.iteritems(): # for every coalition
# # get list corresponding to the key
# coalition = self._key_to_list(coalition)
# # get all non-empty strict subsets of this list,
# # and assign the mother coalition count
# subset_counts = \
# {self._list_to_key(sub_coalition): count
# for sub_coalition in all_proper_subsets_iterator(coalition)}
# # update the coalition_values counter with these counts
# self.coalition_values.update(subset_counts)
# if verbose:
# print(" after {} contributions:\n {}" \
# .format(coalition, self.coalition_values))
#
# def mk_item_list(self):
# self.item_list = list(unique(self.item_seperator.join(dm.coalition_obs.keys()) \
# .split(self.item_seperator)))
#
# # def all_supersets_iterator(self, subset):
#
# # subset = dm
#
# def mk_contribution_df(self):
# self._fill_counters()
# self.contribution_df = \
# pd.DataFrame(index=self.coalition_values.keys(), columns=dm.item_list)
# for coalition in self.contribution_df.index.values:
# print self._remove_and_remain_dicts(coalition)
# for rr in self._remove_and_remain_dicts(coalition):
# # the contribution of each item is the total contribution
# # minus what the contribution would be without this item
# contribution = \
# self.coalition_values[coalition] \
# - self.coalition_values[rr['remaining']]
# # enter this in the contribution_df
# self.contribution_df.loc[coalition, rr['removed']] = contribution
#
# def _fill_counters(self):
# """
# adds missing item combinations to counters, giving them 0 count
# """
# self.mk_item_list()
# zero_counts = {k: 0 for k in itertools.imap(self._list_to_key,
# all_proper_subsets_iterator(self.item_list))
# }
# self.coalition_obs.update(zero_counts)
# self.coalition_values.update(zero_counts)
#
# def _list_to_key(self, coalition):
# """
# Transforms a list of strings to a comma (or item_seperator) separated string
# of unique items of the input list.
# """
# return self.item_seperator.join(unique(coalition))
#
# def _key_to_list(self, coalition_key):
# """
# Inverse of _list_to_key:
# Returns a list from a character (item_seperator) seperated string of items.
# """
# return coalition_key.split(self.item_seperator)
#
# def _remove_and_remain_dicts(self, superset):
# """
# Returns a list of {removed, remaining} dicts listing all (keys of) superset - item
# sets for every item in superset.
# Returns an empty list if the input superset has only one element.
# Example:
# self._remove_and_remain_dicts('A,B,C')
# returns
# [{'remaining': 'B,C', 'removed': 'A'},
# {'remaining': 'A,B', 'removed': 'C'},
# {'remaining': 'A,C', 'removed': 'B'}]
# """
# superset = set(self._key_to_list(superset))
# if len(superset) > 1:
# return [{'removed': x,
# 'remaining': self._list_to_key(
# list(superset.difference(x)))}
# for x in superset]
# else:
# return list() # return empty list if superset has only one element
#
#
# def _test_shapley_data_model():
# list_of_coalitions = [['A', 'B', 'C'], ['A', 'C', 'B'], ['B', 'A', 'C'], ['A', 'A', 'B', 'C'],
# ['C', 'A'], ['B', 'C'], ['C', 'B'], ['C', 'B'], ['A']]
# dm = ShapleyDataModel_old() # initialize the data model
#
# for coalition in list_of_coalitions: # count the coalitions
# dm.absorb_coalition(coalition)
# assert dm.coalition_obs == Counter({'A,B,C': 4, 'B,C': 3, 'A': 1, 'A,C': 1}), \
# "Unexpected result for dm.coalition_obs"
#
# dm.mk_coalition_contributions()
# assert dm.coalition_values \
# == Counter({'C': 8, 'B': 7, 'B,C': 7, 'A': 6, 'A,C': 5, 'A,B,C': 4, 'A,B': 4}), \
# "Unexpected result for dm.coalition_values"
#
# print("All good in _test_shapley_data_model")
|
mit
|
PythonCharmers/bokeh
|
bokeh/charts/builder/tests/test_step_builder.py
|
33
|
2495
|
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Step
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestStep(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [2, 3, 7, 5, 26]
xyvalues['pypy'] = [12, 33, 47, 15, 126]
xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
y_python = [ 2., 2., 3., 3., 7., 7., 5., 5., 26.]
y_jython = [ 22., 22.,43., 43., 10., 10., 25., 25., 26.]
y_pypy = [ 12., 12., 33., 33., 47., 47., 15., 15., 126.]
x = [0, 1, 1, 2, 2, 3, 3, 4, 4]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], x)
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
|
bsd-3-clause
|
tomlof/scikit-learn
|
sklearn/naive_bayes.py
|
20
|
30830
|
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array, check_consistent_length
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like, shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB(priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB(priors=None)
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, priors=None):
self.priors = priors
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool, optional (default=False)
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
n_classes = len(self.classes_)
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if priors.sum() != 1.0:
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_) -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes], optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional (default=0.0)
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,], optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
|
bsd-3-clause
|
onurgu/ner-tagger-tensorflow
|
scripts/inspect_results.py
|
1
|
5722
|
# coding: utf-8
# In[2]:
import pymongo
from IPython.display import display
import glob
import os
import json
def find_runs_on_filesystem(campaign_name, logs_filepath="../experiment-logs/"):
runs = []
for run_dir in glob.glob("/".join([logs_filepath, "[0-9]*"])):
run = {}
try:
with open(os.path.join(run_dir, "info.json"), "r") as f:
run["info"] = json.load(f)
with open(os.path.join(run_dir, "config.json"), "r") as f:
run["config"] = json.load(f)
if run["config"]["experiment_name"] == campaign_name:
runs.append(run)
except IOError as e:
print(e)
return runs
def report_results_of_a_specific_campaign(campaign_name, db_type):
print(campaign_name)
if db_type == "mongo":
client = pymongo.MongoClient("localhost", 27017)
db = client.joint_ner_and_md
runs = db.runs.find({"config.experiment_name": campaign_name})
else:
runs = find_runs_on_filesystem(campaign_name, logs_filepath=db_type)
configs = []
for run_idx, run in enumerate(runs):
dict_to_report = dict(run["config"])
initial_keys = dict_to_report.keys()
print initial_keys
result_designation_labels = ["MORPH", "NER", "YURET"]
dict_to_report["epochs"] = max([len(run["info"][label].keys())
for label in ["NER_dev_f_score", "MORPH_dev_f_score"]])
for result_designation_label in result_designation_labels:
print "result_designation_label: ", result_designation_label
if result_designation_label == "YURET":
best_performances = run["info"][result_designation_label + "_test_f_score"]
else:
best_performances = run["info"][result_designation_label + "_dev_f_score"]
print best_performances
best_dev_result_for_this_run = 0
best_test_result_for_this_run = 0
epoch_id_of_the_best_dev_result = -1
# display(run["config"])
for epoch in sorted([int(k) for k in best_performances.keys()]):
# if result_designation_label != "NER":
# corrected_epoch = epoch + 1
epoch_max = max(best_performances[str(epoch)])
if epoch_max > best_dev_result_for_this_run:
epoch_id_of_the_best_dev_result = epoch
best_dev_result_for_this_run = epoch_max
best_test_result_for_this_run = \
max(run["info"][result_designation_label + "_test_f_score"][str(epoch)])
# print "run_idx: %d, epoch: %d, epoch_best_performance: %.2lf, best_for_this_run: %.2lf" % (run_idx, epoch, epoch_max, best_for_this_run)
dict_to_report[result_designation_label + "_best_dev"] = best_dev_result_for_this_run
dict_to_report[result_designation_label + "_best_test"] = best_test_result_for_this_run
for x in result_designation_labels:
# if x != result_designation_label:
print "x: ", x
print "epoch_id_of_the_best_dev_result: ", epoch_id_of_the_best_dev_result
dict_to_report[result_designation_label + "_to_" + x + "_test"] = \
max(run["info"][x + "_test_f_score"][str(epoch_id_of_the_best_dev_result)]) \
if str(epoch_id_of_the_best_dev_result) in run["info"][x + "_test_f_score"].keys() else -1
print dict_to_report[result_designation_label + "_to_" + x + "_test"]
configs.append({key: dict_to_report[key] for key in [x for x in ["host",
"integration_mode",
"active_models",
"train_with_yuret",
"use_golden_morpho_analysis_in_word_representation",
"multilayer",
"shortcut_connections",
"epochs"] if x in dict_to_report] +
[x for x in dict_to_report.keys() if x not in initial_keys]})
import pandas
df = pandas.DataFrame.from_dict(configs)
print configs
cols = df.columns.tolist()
# display(df[["host"] +
# [x for x in dict_to_report.keys() if x not in initial_keys]])
display(df)
df_groupedby_hyperparameters = df.groupby(["integration_mode",
"active_models",
"train_with_yuret",
"use_golden_morpho_analysis_in_word_representation",
"multilayer",
"shortcut_connections"])
return df, df_groupedby_hyperparameters.NER_best_test.mean()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--campaign_name", default="section1-all-20171013-01")
parser.add_argument("--db_type", default="mongo")
args = parser.parse_args()
df, df_groupedby_hyperparameter_NER_best_test_mean = report_results_of_a_specific_campaign(args.campaign_name, args.db_type)
df.to_csv("./scripts/results-%s.csv" % args.campaign_name)
df_groupedby_hyperparameter_NER_best_test_mean.to_csv("./scripts/results-NER_best_test_mean-%s.csv" % args.campaign_name)
|
mit
|
ARudiuk/mne-python
|
tutorials/plot_stats_cluster_methods.py
|
3
|
8605
|
# doc:slow-example
"""
.. _tut_stats_cluster_methods:
======================================================
Permutation t-test on toy data with spatial clustering
======================================================
Following the illustrative example of Ridgway et al. 2012,
this demonstrates some basic ideas behind both the "hat"
variance adjustment method, as well as threshold-free
cluster enhancement (TFCE) methods in mne-python.
This toy dataset consists of a 40 x 40 square with a "signal"
present in the center (at pixel [20, 20]) with white noise
added and a 5-pixel-SD normal smoothing kernel applied.
For more information, see:
Ridgway et al. 2012, "The problem of low variance voxels in
statistical parametric mapping; a new hat avoids a 'haircut'",
NeuroImage. 2012 Feb 1;59(3):2131-41.
Smith and Nichols 2009, "Threshold-free cluster enhancement:
addressing problems of smoothing, threshold dependence, and
localisation in cluster inference", NeuroImage 44 (2009) 83-98.
In the top row plot the T statistic over space, peaking toward the
center. Note that it has peaky edges. Second, with the "hat" variance
correction/regularization, the peak becomes correctly centered. Third,
the TFCE approach also corrects for these edge artifacts. Fourth, the
the two methods combined provide a tighter estimate, for better or
worse.
Now considering multiple-comparisons corrected statistics on these
variables, note that a non-cluster test (e.g., FDR or Bonferroni) would
mis-localize the peak due to sharpness in the T statistic driven by
low-variance pixels toward the edge of the plateau. Standard clustering
(first plot in the second row) identifies the correct region, but the
whole area must be declared significant, so no peak analysis can be done.
Also, the peak is broad. In this method, all significances are
family-wise error rate (FWER) corrected, and the method is
non-parametric so assumptions of Gaussian data distributions (which do
actually hold for this example) don't need to be satisfied. Adding the
"hat" technique tightens the estimate of significant activity (second
plot). The TFCE approach (third plot) allows analyzing each significant
point independently, but still has a broadened estimate. Note that
this is also FWER corrected. Finally, combining the TFCE and "hat"
methods tightens the area declared significant (again FWER corrected),
and allows for evaluation of each point independently instead of as
a single, broad cluster.
Note that this example does quite a bit of processing, so even on a
fast machine it can take a few minutes to complete.
"""
# Authors: Eric Larson <[email protected]>
# License: BSD (3-clause)
import numpy as np
from scipy import stats
from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa; this changes hidden mpl vars
from mne.stats import (spatio_temporal_cluster_1samp_test,
bonferroni_correction, ttest_1samp_no_p)
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
print(__doc__)
###############################################################################
# Set parameters
# --------------
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
threshold = -stats.distributions.t.ppf(0.05, n_subjects - 1)
threshold_tfce = dict(start=0, step=0.2)
n_permutations = 1024 # number of clustering permutations (1024 for exact)
###############################################################################
# Construct simulated data
# ------------------------
#
# Make the connectivity matrix just next-neighbor spatially
n_src = width * width
connectivity = grid_to_graph(width, width)
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(42)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the dead center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
###############################################################################
# Do some statistics
# ------------------
#
# .. note::
# X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions:
X = X.reshape((n_subjects, 1, n_src))
###############################################################################
# Now let's do some clustering using the standard method.
#
# .. note::
# Not specifying a connectivity matrix implies grid-like connectivity,
# which we want here:
T_obs, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
# Let's put the cluster data in a readable format
ps = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps[cl[1]] = -np.log10(p)
ps = ps.reshape((width, width))
T_obs = T_obs.reshape((width, width))
# To do a Bonferroni correction on these data is simple:
p = stats.distributions.t.sf(T_obs, n_subjects - 1)
p_bon = -np.log10(bonferroni_correction(p)[1])
# Now let's do some clustering using the standard method with "hat":
stat_fun = partial(ttest_1samp_no_p, sigma=sigma)
T_obs_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun)
# Let's put the cluster data in a readable format
ps_hat = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps_hat[cl[1]] = -np.log10(p)
ps_hat = ps_hat.reshape((width, width))
T_obs_hat = T_obs_hat.reshape((width, width))
# Now the threshold-free cluster enhancement method (TFCE):
T_obs_tfce, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
T_obs_tfce = T_obs_tfce.reshape((width, width))
ps_tfce = -np.log10(p_values.reshape((width, width)))
# Now the TFCE with "hat" variance correction:
T_obs_tfce_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun)
T_obs_tfce_hat = T_obs_tfce_hat.reshape((width, width))
ps_tfce_hat = -np.log10(p_values.reshape((width, width)))
###############################################################################
# Visualize results
# -----------------
fig = plt.figure(facecolor='w')
x, y = np.mgrid[0:width, 0:width]
kwargs = dict(rstride=1, cstride=1, linewidth=0, cmap='Greens')
Ts = [T_obs, T_obs_hat, T_obs_tfce, T_obs_tfce_hat]
titles = ['T statistic', 'T with "hat"', 'TFCE statistic', 'TFCE w/"hat" stat']
for ii, (t, title) in enumerate(zip(Ts, titles)):
ax = fig.add_subplot(2, 4, ii + 1, projection='3d')
ax.plot_surface(x, y, t, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
p_lims = [1.3, -np.log10(1.0 / n_permutations)]
pvals = [ps, ps_hat, ps_tfce, ps_tfce_hat]
titles = ['Standard clustering', 'Clust. w/"hat"',
'Clust. w/TFCE', 'Clust. w/TFCE+"hat"']
axs = []
for ii, (p, title) in enumerate(zip(pvals, titles)):
ax = fig.add_subplot(2, 4, 5 + ii)
plt.imshow(p, cmap='Purples', vmin=p_lims[0], vmax=p_lims[1])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
axs.append(ax)
plt.tight_layout()
for ax in axs:
cbar = plt.colorbar(ax=ax, shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025)
cbar.set_label('-log10(p)')
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p for p in p_lims])
plt.show()
|
bsd-3-clause
|
nouiz/pydy
|
examples/rattleback/cpp/rattleback_plot.py
|
8
|
2854
|
import numpy as np
import matplotlib.pyplot as plt
#import os
# Define a data type for the simdata structure
simdata = np.dtype([('t', np.float64),
('q0', np.float64),
('q1', np.float64),
('q2', np.float64),
('q3', np.float64),
('q4', np.float64),
('u0', np.float64),
('u1', np.float64),
('u2', np.float64),
('qd0', np.float64),
('qd1', np.float64),
('qd2', np.float64),
('qd3', np.float64),
('qd4', np.float64),
('ud0', np.float64),
('ud1', np.float64),
('ud2', np.float64),
('Rx', np.float64),
('Ry', np.float64),
('Rz', np.float64),
('ke', np.float64),
('pe', np.float64),
('te', np.float64),
('delta', np.float64)])
#os.system('make simulation.dat')
data = np.fromfile('simulation.dat', dtype=simdata) # read the data
plt.figure()
plt.subplot(211)
plt.plot(data['t'], data['delta']*180./np.pi)
plt.ylabel(r'$\delta$')
plt.subplot(212)
plt.plot(data['t'], data['q0']*180./np.pi)
plt.ylabel(r'$\gamma$')
plt.xlabel('time, seconds')
plt.figure()
#plt.subplot(211)
plt.ylabel(r'degrees, degrees/second')
plt.plot(data['t'], data['qd0']*180./np.pi, label='$\dot{\psi}$')
plt.plot(data['t'], data['q0']*180./np.pi, label='$\psi$')
plt.legend(loc=0)
plt.xlabel('time, seconds')
plt.figure()
plt.plot(data['t'], data['u0'], label='$\omega_x$')
plt.plot(data['t'], data['u1'], label='$\omega_y$')
plt.plot(data['t'], data['u2'], label='$\omega_z$')
plt.title('Body fixed angular velocity')
plt.legend(loc=0)
plt.figure()
plt.title('Mechanical energy')
plt.subplot(311)
plt.plot(data['t'], data['ke'], label='ke')
plt.legend(loc=0)
plt.subplot(312)
plt.plot(data['t'], data['pe'], label='pe')
plt.legend(loc=0)
plt.subplot(313)
plt.plot(data['t'], data['te'] - data['te'][0], label=r'$\Delta E$')
plt.legend(loc=0)
plt.figure()
plt.subplot(211)
plt.plot(data['t'], data['Rx'], label='$\mathbf{F}_x$')
plt.legend(loc=0)
plt.plot(data['t'], data['Ry'], label='$\mathbf{F}_y$')
plt.legend(loc=0)
plt.subplot(212)
plt.plot(data['t'], data['Rz'], label='$\mathbf{F}_z$')
plt.legend(loc=0)
plt.title('Contact point reaction force')
plt.legend(loc=0)
plt.figure()
plt.plot(data['t'], data['ud0'], label='$\dot{\omega}_x$')
plt.plot(data['t'], data['ud1'], label='$\dot{\omega}_y$')
plt.plot(data['t'], data['ud2'], label='$\dot{\omega}_z$')
plt.title('Body fixed angular acceleration')
plt.legend(loc=0)
plt.figure()
plt.plot(data['q3'],data['q4'])
plt.title('Contact point location')
plt.show()
|
bsd-3-clause
|
tongwang01/tensorflow
|
tensorflow/contrib/factorization/python/ops/gmm.py
|
16
|
7461
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering.
This goes on top of skflow API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators._sklearn import TransformerMixin
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
class GMM(estimator.Estimator, TransformerMixin):
"""GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
batch_size=128,
steps=10,
continue_training=False,
config=None,
verbose=1):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
batch_size: See Estimator
steps: See Estimator
continue_training: See Estimator
config: See Estimator
verbose: See Estimator
"""
super(GMM, self).__init__(
model_dir=model_dir,
config=config)
self.batch_size = batch_size
self.steps = steps
self.continue_training = continue_training
self.verbose = verbose
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
def fit(self, x, y=None, monitors=None, logdir=None, steps=None):
"""Trains a GMM clustering on x.
Note: See Estimator for logic for continuous training and graph
construction across multiple calls to fit.
Args:
x: training input matrix of shape [n_samples, n_features].
y: labels. Should be None.
monitors: List of `Monitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for optional
visualization.
steps: number of training steps. If not None, overrides the value passed
in constructor.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = data_feeder.setup_train_data_feeder(
x, None, self._num_clusters, self.batch_size)
self._train_model(input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors,
init_feed_fn=self._data_feeder.get_feed_dict_fn())
return self
def predict(self, x, batch_size=None):
"""Predict cluster id for each element in x.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, containing cluster ids.
"""
return np.array([
prediction[GMM.ASSIGNMENTS] for prediction in
super(GMM, self).predict(x=x, batch_size=batch_size, as_iterable=True)])
def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total score.
"""
return np.sum(self.evaluate(x=x, batch_size=batch_size)[GMM.SCORES])
def transform(self, x, batch_size=None):
"""Transforms each element in x to distances to cluster centers.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
return np.array([
prediction[GMM.ALL_SCORES] for prediction in
super(GMM, self).predict(x=x, batch_size=batch_size, as_iterable=True)])
def clusters(self):
"""Returns cluster centers."""
clusters = tf.contrib.framework.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return tf.contrib.framework.load_variable(
self.model_dir,
gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat(1, [features[k] for k in sorted(features.keys())])
return features
def _get_train_ops(self, features, _):
(_,
_,
losses,
training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
incr_step = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
loss = tf.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
return training_op, loss
def _get_predict_ops(self, features):
(all_scores,
model_predictions,
_,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
def _get_eval_ops(self, features, _, unused_metrics):
(_,
_,
losses,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {
GMM.SCORES: tf.reduce_sum(losses),
}
|
apache-2.0
|
chrisburr/scikit-learn
|
sklearn/decomposition/nmf.py
|
8
|
46556
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Mathieu Blondel <[email protected]>
# Tom Dupre la Tour
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (Projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import division, print_function
from math import sqrt
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted, check_non_negative
from ..utils import deprecated
from ..exceptions import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _safe_compute_error(X, W, H):
"""Frobenius norm between X and WH, safe for sparse array"""
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(norm_X + norm_WH - 2. * cross_prod)
return error
def _check_string_param(sparseness, solver):
allowed_sparseness = (None, 'data', 'components')
if sparseness not in allowed_sparseness:
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, allowed_sparseness))
allowed_solver = ('pg', 'cd')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise 'random'.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float
Truncate all values less then this in output to zero.
random_state : int seed, RandomState instance, or None (default)
Random number generator seed control, used in 'nndsvdar' and
'random' modes.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
V : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
def _update_projected_gradient_w(X, W, H, tolW, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = H.shape[0]
if sparseness is None:
Wt, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(beta) * np.ones((1,
n_components_))]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(eta) * np.eye(n_components_)]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return Wt.T, gradW.T, iterW
def _update_projected_gradient_h(X, W, H, tolH, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = W.shape[1]
if sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((n_components_, n_features))]),
safe_vstack([W,
np.sqrt(eta) * np.eye(n_components_)]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(beta)
* np.ones((1, n_components_))]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return H, gradH, iterH
def _fit_projected_gradient(X, W, H, tol, max_iter,
nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Compute Non-negative Matrix Factorization (NMF) with Projected Gradient
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with Sparseness Constraints.
Journal of Machine Learning Research 2004.
"""
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition
# as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
W, gradW, iterW = _update_projected_gradient_w(X, W, H, tolW,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _update_projected_gradient_h(X, W, H, tolH,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
W, _, _ = _update_projected_gradient_w(X, W, H, tol, nls_max_iter,
alpha, l1_ratio, sparseness,
beta, eta)
return W, H, n_iter
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, alpha=0.001,
l1_ratio=0., regularization=None, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
# L1 and L2 regularization
l1_H, l2_H, l1_W, l2_W = 0, 0, 0, 0
if regularization in ('both', 'components'):
alpha = float(alpha)
l1_H = l1_ratio * alpha
l2_H = (1. - l1_ratio) * alpha
if regularization in ('both', 'transformation'):
alpha = float(alpha)
l1_W = l1_ratio * alpha
l2_W = (1. - l1_ratio) * alpha
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_W, l2_W,
shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_H, l2_H,
shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False, nls_max_iter=2000,
sparseness=None, beta=1, eta=0.1):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
_check_string_param(sparseness, solver)
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, six.integer_types) or n_components <= 0:
raise ValueError("Number of components must be positive;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, numbers.Number) or max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom':
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
if solver == 'pg':
warnings.warn("'pg' solver will be removed in release 0.19."
" Use 'cd' solver instead.", DeprecationWarning)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
max_iter,
nls_max_iter,
alpha, l1_ratio,
sparseness,
beta, eta)
else: # transform
W, H, n_iter = _update_projected_gradient_w(X, W, H,
tol, nls_max_iter,
alpha, l1_ratio,
sparseness, beta,
eta)
elif solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
max_iter,
alpha, l1_ratio,
regularization,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, init=None, solver='cd',
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0, shuffle=False,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
if sparseness is not None:
warnings.warn("Controlling regularization through the sparseness,"
" beta and eta arguments is only available"
" for 'pg' solver, which will be removed"
" in release 0.19. Use another solver with L1 or L2"
" regularization instead.", DeprecationWarning)
self.nls_max_iter = nls_max_iter
self.sparseness = sparseness
self.beta = beta
self.eta = eta
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components,
init=self.init, update_H=True, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
if self.solver == 'pg':
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
self.reconstruction_err_ = _safe_compute_error(X, W, H)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Attributes
----------
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
self.n_iter_ = n_iter_
return W
@deprecated("It will be removed in release 0.19. Use NMF instead."
"'pg' solver is still available until release 0.19.")
class ProjectedGradientNMF(NMF):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
super(ProjectedGradientNMF, self).__init__(
n_components=n_components, init=init, solver='pg', tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio, verbose=verbose, nls_max_iter=nls_max_iter,
sparseness=sparseness, beta=beta, eta=eta)
|
bsd-3-clause
|
ip-tools/ip-navigator
|
patzilla/navigator/export.py
|
1
|
34177
|
# -*- coding: utf-8 -*-
# (c) 2016-2018 Andreas Motl <[email protected]>
import os
import json
import types
import string
import logging
import pandas
import numpy
import html2text
import where
import envoy
import shutil
import tempfile
from io import BytesIO
from textwrap import dedent
from lxml import etree as ET
from bunch import bunchify, Bunch
from json.encoder import JSONEncoder
from zipfile import ZipFile, ZIP_DEFLATED
from collections import OrderedDict
from cornice.util import _JSONError
from xlsxwriter.worksheet import Worksheet
from pyramid.httpexceptions import HTTPError
from patzilla.access.generic.pdf import pdf_ziparchive_add
from patzilla.access.epo.ops.api import ops_description, get_ops_biblio_data, ops_register, ops_claims, ops_family_inpadoc
from patzilla.access.generic.exceptions import ignored
from patzilla.util.date import humanize_date_english
from patzilla.util.numbers.common import decode_patent_number, encode_epodoc_number
from patzilla.util.python import exception_traceback
log = logging.getLogger(__name__)
class Dossier(object):
summary_template = dedent(u"""
Summary
The research about »{project_name}«
started on {project_created} and was
most recently updated on {project_modified}.
{queries_count} search queries were conducted
across {datasource_count} data sources ({datasource_list})
and yielded {total_search_count} hits in total.
{total_review_count} documents were reviewed:
{rated_count} documents have been rated, {dismissed_count} have been dismissed and
{seen_count} documents were visited without putting any rating on them.
{comments_count} comments were made.
""").strip()
def __init__(self, data):
self.data = bunchify(data)
self.prepare_dataframes()
self.make_metadata()
def make_metadata(self):
self.metadata = ReportMetadata()
self.metadata.set('producer', u'IP Navigator')
# Project metadata
self.metadata.set('project_name', self.data.project.name)
self.metadata.set('project_created', humanize_date_english(self.data.project.created))
self.metadata.set('project_modified', humanize_date_english(self.data.project.modified))
if 'user' in self.data and self.data.user:
if 'fullname' in self.data.user:
self.metadata.set('author_name', self.data.user.fullname)
if 'username' in self.data.user:
self.metadata.set('author_email', self.data.user.username)
# Project-associated metadata
self.metadata.set('queries_count', len(self.data.project.queries))
self.metadata.set('comments_count', len(self.data.comments))
# Collection metadata
self.metadata.set('rated_count', len(self.data.collections.rated))
self.metadata.set('dismissed_count', len(self.data.collections.dismissed))
self.metadata.set('seen_count', len(self.data.collections.seen))
# Unique list of data sources
datasources = list(self.df_queries.datasource.unique())
self.metadata.set('datasource_list', ', '.join(datasources))
self.metadata.set('datasource_count', len(datasources))
# Totals
self.metadata.set('total_search_count', int(self.df_queries.hits.sum()))
self.metadata.set('total_review_count',
self.metadata['rated_count'] + self.metadata['dismissed_count'] + self.metadata['seen_count'])
#print 'metadata:'; pprint(self.metadata)
def prepare_dataframes(self):
# Main DataFrame for aggregating sub results
self.df_documents = pandas.DataFrame()
# Aggregate all results
for collection_name in ['rated', 'dismissed', 'seen']:
# Wrap entries into DataFrame
entries = self.data.collections[collection_name]
df = pandas.DataFrame(entries, columns=['number', 'score', 'dismiss', 'seen', 'timestamp', 'url'])
df.rename(columns={'number': 'document'}, inplace=True)
# Aggregate all DateFrame items
self.df_documents = self.df_documents.append(df)
# Amend "NaN" boolean values to "False"
self.df_documents['seen'].fillna(value=False, inplace=True)
self.df_documents['dismiss'].fillna(value=False, inplace=True)
# Cast to boolean type
self.df_documents['seen'] = self.df_documents['seen'].astype('bool')
self.df_documents['dismiss'] = self.df_documents['dismiss'].astype('bool')
# Queries
queries = map(self.query_criteria_smoother, self.data.get('queries', []))
self.df_queries = pandas.DataFrame(queries, columns=['criteria', 'query_expression', 'result_count', 'datasource', 'created'])
self.df_queries.rename(columns={'query_expression': 'expression', 'result_count': 'hits', 'created': 'timestamp'}, inplace=True)
# Comments
self.df_comments = pandas.DataFrame(self.data.get('comments'), columns=['parent', 'text', 'modified'])
self.df_comments.rename(columns={'parent': 'document', 'text': 'comment', 'modified': 'timestamp'}, inplace=True)
@staticmethod
def query_criteria_smoother(entry):
criteria = entry.get('query_data', {}).get('criteria')
entry['criteria'] = json.dumps(criteria)
try:
entry['result_count'] = int(entry['result_count'])
except:
pass
return entry
def format_with_metadata(self, template):
return string.Formatter().vformat(template, (), self.metadata)
def generate_with_metadata(self, template, **more_kwargs):
metadata = self.metadata.copy()
metadata.update(more_kwargs)
formatter = EmphasizingFormatterGenerator()
return formatter.vgenerate(template, (), metadata)
def get_summary(self):
output = self.format_with_metadata(self.summary_template)
return output
def get_metadata(self):
return self.format_with_metadata(
u'Author: {author_name} <{author_email}>\n'
u'Created: {project_created}\n'
u'Updated: {project_modified}\n'
u'Producer: {producer}')
@staticmethod
def to_csv(dataframe):
# Serialize as CSV
buffer = BytesIO()
dataframe.to_csv(buffer, index=False, encoding='utf-8')
payload = buffer.getvalue()
return payload
@staticmethod
def to_json(dataframe):
return json.dumps(dataframe.to_dict(orient='records'), indent=4, cls=PandasJSONEncoder)
#return dataframe.to_json(orient='records', date_format='iso')
def to_zip(self, request=None, options=None):
"""
u'options': {u'media': {u'biblio': False,
u'claims': False,
u'description': False,
u'pdf': True,
u'register': False},
u'report': {u'csv': True,
u'json': True,
u'pdf': False,
u'xlsx': False}},
"""
# TODO: Text representations for biblio, register, family
# TODO: PDF Extracts
options = options or bunchify({'report': {}, 'media': {}})
# Remove entries with empty/undefined document numbers
self.df_documents.dropna(subset=['document'], inplace=True)
# Reject entries with seen == True
filtered = self.df_documents[(self.df_documents.seen == False)]
documents = list(filtered.document)
buffer = BytesIO()
with ZipFile(buffer, 'w', ZIP_DEFLATED) as zipfile:
# FIXME: Add TERMS (liability waiver) and more...
zipfile.writestr('@readme.txt', u'Zip archive created by IP Navigator.')
# Add text summary
zipfile.writestr('@metadata.txt', self.get_metadata().encode('utf-8'))
zipfile.writestr('@summary.txt', self.get_summary().encode('utf-8'))
# Report files
# ------------
# Add Workbook
workbook_payload = None
if options.report.xlsx:
workbook_payload = DossierXlsx(self.data).create()
zipfile.writestr('report/@dossier.xlsx', workbook_payload)
# Add Workbook in PDF format
if options.report.pdf:
try:
zipfile.writestr('report/@dossier.pdf', DossierXlsx(self.data).to_pdf(payload=workbook_payload))
except Exception as ex:
log.error(u'Rendering dossier to PDF failed. ' \
u'Exception: {ex}\n{trace}'.format(ex=ex, trace=exception_traceback()))
# Add CSV
if options.report.csv:
zipfile.writestr('report/csv/01-queries.csv', self.to_csv(self.df_queries))
zipfile.writestr('report/csv/02-documents.csv', self.to_csv(self.df_documents))
zipfile.writestr('report/csv/03-comments.csv', self.to_csv(self.df_comments))
# Add JSON
if options.report.json:
zipfile.writestr('report/json/01-queries.json', self.to_json(self.df_queries))
zipfile.writestr('report/json/02-documents.json', self.to_json(self.df_documents))
zipfile.writestr('report/json/03-comments.json', self.to_json(self.df_comments))
# Media files
# -----------
# FIXME: This should go to some configuration setting.
fulltext_countries_excluded_ops = ['BE', 'CN', 'DD', 'DE', 'DK', 'FR', 'GR', 'HU', 'JP', 'LU', 'KR', 'RU', 'PT', 'SE', 'TR', 'SK', 'US']
# Add full PDF documents
if options.media.pdf:
pdf_ziparchive_add(zipfile, documents, path='media/pdf')
# Add XML data
# TODO: Add @report.txt for reflecting missing documents, differentiate between different XML kinds.
# TODO: Add more TEXT formats (.abstract.txt, .biblio.txt, .register.txt)
# TODO: Add ST.36 XML; e.g. from https://register.epo.org/download?number=EP08835045&tab=main&xml=st36
# via https://register.epo.org/application?number=EP08835045
# TODO: Add equivalents, e.g. http://ops.epo.org/3.1/rest-services/published-data/publication/epodoc/EP1000000/equivalents/biblio
status = OrderedDict()
for document in documents:
if not document or not document.strip():
continue
log.info(u'Data acquisition for document {document}'.format(document=document))
status.setdefault(document, OrderedDict())
patent = decode_patent_number(document)
# Add XML "bibliographic" data (full-cycle)
if options.media.biblio:
try:
biblio_payload = get_ops_biblio_data('publication', document, xml=True)
zipfile.writestr(u'media/xml/{document}.biblio.xml'.format(document=document), biblio_payload)
status[document]['biblio'] = True
except Exception as ex:
status[document]['biblio'] = False
self.handle_exception(ex, 'biblio', document)
self.clear_request_errors(request)
# Add XML "description" full text data
# OPS does not have full texts for DE, US, ...
if options.media.description:
status[document]['description'] = False
if patent.country not in fulltext_countries_excluded_ops:
try:
# Write XML
document_number = encode_epodoc_number(patent)
description_payload = ops_description(document_number, xml=True)
zipfile.writestr(u'media/xml/{document}.description.xml'.format(document=document), description_payload)
status[document]['description'] = True
# Write TEXT
with ignored():
text_payload = self.get_fulltext(description_payload, 'description')
if text_payload:
zipfile.writestr(u'media/txt/{document}.description.txt'.format(document=document), text_payload.encode('utf-8'))
except Exception as ex:
self.handle_exception(ex, 'description', document)
self.clear_request_errors(request)
# Add XML "claims" full text data
# OPS does not have full texts for DE, US, ...
if options.media.claims:
status[document]['claims'] = False
if patent.country not in fulltext_countries_excluded_ops:
try:
# Write XML
document_number = encode_epodoc_number(patent)
claims_payload = ops_claims(document_number, xml=True)
zipfile.writestr(u'media/xml/{document}.claims.xml'.format(document=document), claims_payload)
status[document]['claims'] = True
# Write TEXT
with ignored():
text_payload = self.get_fulltext(claims_payload.replace('<claim-text>', '<p>').replace('</claim-text>', '</p>'), 'claims')
if text_payload:
zipfile.writestr(u'media/txt/{document}.claims.txt'.format(document=document), text_payload.encode('utf-8'))
except Exception as ex:
self.handle_exception(ex, 'claims', document)
self.clear_request_errors(request)
# Add XML register data
if options.media.register:
try:
register_payload = ops_register('publication', document, xml=True)
zipfile.writestr(u'media/xml/{document}.register.xml'.format(document=document), register_payload)
status[document]['register'] = True
except Exception as ex:
status[document]['register'] = False
self.handle_exception(ex, 'register', document)
self.clear_request_errors(request)
# Add XML family data
if options.media.family:
try:
document_number = encode_epodoc_number(patent, options={'nokind': True})
family_payload = ops_family_inpadoc('publication', document_number, 'biblio', xml=True)
zipfile.writestr(u'media/xml/{document}.family.xml'.format(document=document), family_payload)
status[document]['family'] = True
except Exception as ex:
status[document]['family'] = False
self.handle_exception(ex, 'family', document)
self.clear_request_errors(request)
#from pprint import pprint; print '====== status:'; pprint(status)
# Generate report
# ---------------
# TODO: Format more professionally incl. generator description
# TODO: Unify with "pdf_universal_multi"
delivered_items = []
missing_items = []
for document, kinds in status.iteritems():
delivered = []
missing = []
for kind, ok in kinds.iteritems():
if ok:
delivered.append(kind)
else:
missing.append(kind)
if delivered:
item = u'{document:20}{delivered}'.format(document=document, delivered=u', '.join(delivered))
delivered_items.append(item)
if missing:
item = u'{document:20}{missing}'.format(document=document, missing=u', '.join(missing))
missing_items.append(item)
if delivered_items or missing_items:
report_template = dedent("""
Delivered artifacts ({delivered_count}):
{delivered_files}
Missing artifacts ({missing_count}):
{missing_files}
""").strip()
report = report_template.format(
delivered_count=len(delivered_items),
missing_count=len(missing_items),
delivered_files='\n'.join(delivered_items),
missing_files='\n'.join(missing_items),
)
log.info('Export report:\n{report}'.format(report=report))
zipfile.writestr('media/xml/@report.txt', report)
payload = buffer.getvalue()
return payload
def handle_exception(self, ex, service_name, document):
if isinstance(ex, (_JSONError, HTTPError)) and hasattr(ex, 'status_int') and ex.status_int == 404:
log.warning(u'XML({service_name}, {document}) not found'.format(service_name=service_name, document=document))
# Signal exception has been handled (ignored)
return True
else:
log.warning(u'XML({service_name}, {document}) failed. ' \
u'Exception:\n{trace}'.format(service_name=service_name, document=document, trace=exception_traceback()))
# Signal exception should be re-raised, maybe
return False
@staticmethod
def clear_request_errors(request):
# Reset cornice error store to prevent errors adding up on bulkyfied OPS requests
del request.errors[:]
@staticmethod
def get_fulltext(payload, what):
xpath_lang = '/ops:world-patent-data/ftxt:fulltext-documents/ftxt:fulltext-document/ftxt:{what}/@lang'.format(what=what)
xpath_content = '/ops:world-patent-data/ftxt:fulltext-documents/ftxt:fulltext-document/ftxt:{what}'.format(what=what)
namespaces = {'ops': 'http://ops.epo.org', 'ftxt': 'http://www.epo.org/fulltext'}
tree = ET.parse(BytesIO(payload))
#print 'tree:'; pprint(tree)
lang = tree.xpath(xpath_lang, namespaces=namespaces)
#print 'lang:', lang
elements = tree.xpath(xpath_content, namespaces=namespaces)
if elements:
return html2text.html2text(ET.tostring(elements[0]))
class PandasJSONEncoder(JSONEncoder):
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
"""
if isinstance(o, (numpy.bool_,)):
return bool(o)
raise TypeError(repr(o) + " is not JSON serializable")
class DossierXlsx(Dossier):
def __init__(self, data):
super(DossierXlsx, self).__init__(data)
self.writer = pandas.ExcelWriter('temp.xlsx', engine='xlsxwriter')
self.workbook = self.writer.book
add_worksheet_monkeypatch(self.workbook)
self.format_wrap_top = self.workbook.add_format()
self.format_wrap_top.set_text_wrap()
self.format_wrap_top.set_align('top')
self.format_small_font = self.workbook.add_format({'align': 'vcenter', 'font_size': 9})
self.format_small_font_align_top = self.workbook.add_format({'align': 'top', 'font_size': 9})
def create(self):
# A memory buffer as ExcelWriter storage backend
buffer = BytesIO()
self.workbook.filename = buffer
# Create "cover" sheet
self.write_cover_sheet()
# Create "queries" sheet
self.write_queries_sheet()
# Create numberlist sheets
self.write_numberlist_sheets()
# Create "comments" sheet
self.write_comments_sheet()
# Save/persist ExcelWriter model
self.writer.save()
# Get hold of buffer content
payload = buffer.getvalue()
return payload
def set_header_footer(self, worksheet):
# http://xlsxwriter.readthedocs.io/example_headers_footers.html
header = u'&LIP Navigator&RSearch report'
worksheet.set_header(header)
footer = u'&L&L&D &T&C&A&RPage &P of &N'
worksheet.set_footer(footer)
def write_cover_sheet(self):
# TODO: Histogram of data source distribution
"""
metadata_value_map = {}
for key, value in metadata.iteritems():
metadata_value_map[key] = value['value']
"""
cover_sheet = self.workbook.add_worksheet('cover')
self.set_header_footer(cover_sheet)
title = u'Dossier »{name}«'.format(name=self.data.project.name)
title_format = self.workbook.add_format({'align': 'center', 'valign': 'vcenter', 'font_size': 17, 'bold': True})
cover_sheet.merge_range('A1:I2', title, title_format)
subtitle = self.get_metadata()
subtitle_format = self.workbook.add_format({'align': 'left', 'valign': 'vcenter', 'indent': 2, 'top': 7, 'bottom': 7})
cover_sheet.merge_range('B4:H7', subtitle, subtitle_format)
# http://xlsxwriter.readthedocs.io/example_merge_rich.html
red = self.workbook.add_format({'color': 'red'})
blue = self.workbook.add_format({'color': 'blue'})
cell_format = self.workbook.add_format({'align': 'left', 'valign': 'vcenter', 'indent': 2, 'top': 7, 'bottom': 7})
cover_sheet.merge_range('B10:H28', "", cell_format)
footnote_format = self.workbook.add_format({'font_size': 9})
footnote = dedent(u"""
Please have a look at the other worksheets in
this workbook for more detailed information about
all queries, comments and document numbers
aggregated throughout the research process.
""").strip()
summary = self.generate_with_metadata(self.summary_template, emphasis=blue)
args = list(summary) + ['\n'] + [footnote_format, u'\n\n' + footnote]
args.append(cell_format)
cover_sheet.write_rich_string('B10', *args)
"""
metadata_row = 20
for key, entry in metadata.iteritems():
report_sheet.write(metadata_row, 0, entry['label'])
report_sheet.write(metadata_row, 1, entry['value'])
metadata_row += 1
"""
def write_numberlist_sheets(self):
sheets = OrderedDict()
sheets['rated'] = self.data.get('collections', {}).get('rated')
sheets['dismissed'] = self.data.get('collections', {}).get('dismissed')
sheets['seen'] = self.data.get('collections', {}).get('seen')
for sheet_name, entries in sheets.iteritems():
#print 'entries:'; pprint(entries)
if entries:
first = entries[0]
else:
first = {}
# Create pandas DataFrame
if type(first) in types.StringTypes:
df = pandas.DataFrame(entries, columns=['PN'])
elif isinstance(first, (types.DictionaryType, Bunch)):
df = pandas.DataFrame(entries, columns=['number', 'score', 'timestamp', 'url'])
df.rename(columns={'number': 'document', 'url': 'display'}, inplace=True)
# Export DataFrame to Excel
df.to_excel(self.writer, sheet_name=sheet_name, index=False)
# Set column widths
wks = self.worksheet_set_column_widths(sheet_name, 25, 15, 30, 25, cell_format=self.format_wrap_top)
wks.set_landscape()
#wks.set_column('C:C', width=19, cell_format=self.format_small_font)
self.set_header_footer(wks)
def write_queries_sheet(self):
# TODO: Add direct url links to queries
self.df_queries.to_excel(self.writer, sheet_name='queries', index=False)
wks = self.worksheet_set_column_widths('queries', 35, 35, 8, 10, 19, cell_format=self.format_wrap_top)
wks.set_landscape()
wks.set_column('E:E', width=19, cell_format=self.format_small_font_align_top)
wks.set_default_row(height=50)
wks.set_row(0, height=16)
self.set_header_footer(wks)
#self.autofit_height(wks, df.comment, default=default_row_height)
#inch = 2.54 # centimeters
#wks.set_margins(left=1.0/inch, right=1.0/inch, top=1.0/inch, bottom=1.0/inch)
def write_comments_sheet(self):
self.df_comments.to_excel(self.writer, sheet_name='comments', index=False)
#format_vcenter = self.workbook.add_format({'align': 'vcenter'})
#wks.set_row(0, cell_format=format_vcenter)
wks = self.worksheet_set_column_widths('comments', 18, 68, 19, cell_format=self.format_wrap_top)
wks.set_column('C:C', width=19, cell_format=self.format_small_font_align_top)
wks.set_landscape()
self.set_header_footer(wks)
default_row_height = 50
wks.set_default_row(height=default_row_height)
wks.set_row(0, height=16)
self.autofit_height(wks, self.df_comments.comment, default=default_row_height)
#ws.set_column('B:B', width=70, cell_format=format_wrap)
#ws.set_column('A:C', cell_format=format_wrap)
def autofit_height(self, wks, items, default=16):
font_size_estimated = 11
line_height_estimated = font_size_estimated / 10
for index, content in enumerate(items):
newline_count = content.count('\n') + 2
row_height = (font_size_estimated + line_height_estimated) * newline_count
row_height = max(row_height, default)
wks.set_row(index + 1, height=row_height)
def worksheet_set_column_widths(self, sheet_name, *widths, **kwargs):
#format_wrap = self.writer.book.add_format()
#format_wrap.set_text_wrap()
if 'cell_format' in kwargs:
cell_format = kwargs['cell_format']
else:
cell_format = self.writer.book.add_format()
#cell_format.set_text_wrap()
cell_format.set_align('vcenter')
# Set column widths
worksheet = self.writer.sheets[sheet_name]
for index, width in enumerate(widths):
colname = chr(65 + index)
colrange = '{0}:{0}'.format(colname)
worksheet.set_column(colrange, width=width, cell_format=cell_format)
#worksheet.set_column(colrange, width=width, cell_format=format_wrap)
return worksheet
def to_pdf(self, payload=None):
# TODO: Run unoconv listener in background on production system: unoconv --listener --verbose
# /Applications/LibreOffice.app/Contents/MacOS/soffice --headless --convert-to pdf --outdir /Users/amo/tmp/oo /Users/amo/Downloads/huhu_2016-07-30T22-40-48+02-00.xlsx
# /Applications/LibreOffice.app/Contents/MacOS/soffice --accept="pipe,name=navigator;urp;" --norestore --nologo --nodefault --headless
# /Applications/LibreOffice.app/Contents/MacOS/soffice --accept="socket,host=localhost,port=2002;urp;" --norestore --nologo --nodefault --headless
# /Applications/LibreOffice.app/Contents/program/python
# import pyoo
# desktop = pyoo.LazyDesktop(pipe='navigator')
# doc = desktop.open_spreadsheet('/Users/amo/Downloads/dossier_haha_2016-08-01T07-14-20+02-00 (5).xlsx')
# doc.save('hello.pdf', filter_name=pyoo.FILTER_PDF_EXPORT)
# /Applications/LibreOffice.app/Contents/program/LibreOfficePython.framework/bin/unoconv --listener --verbose
# /Applications/LibreOffice.app/Contents/program/LibreOfficePython.framework/bin/unoconv --format=pdf --output=~/Downloads --verbose ~/Downloads/dossier_haha_2016-08-01T07-14-20+02-00.xlsx
# Find "unoconv" program
unoconv = self.find_unoconv()
if not unoconv:
raise KeyError('Could not find "unoconv" on system, aborting PDF conversion.')
# Generate Office Open XML Workbook
if not payload:
payload = self.create()
# Save to temporary file
xlsx_file = tempfile.NamedTemporaryFile(suffix='.xlsx', delete=False)
xlsx_file.write(payload)
xlsx_file.flush()
# Create temporary path for PDF conversion
#pdf_path = tempfile.mkdtemp()
#pdf_path = os.path.dirname(xlsx_file.name)
pdf_path = xlsx_file.name.replace('.xlsx', '.pdf')
# Run conversion command ("unoconv", based on Open Office)
# "aptitude install unoconv" should get you started
"""
-c, --connection=string use a custom connection string
-l, --listener start a permanent listener to use by unoconv clients
-n, --no-launch fail if no listener is found (default: launch one)
"""
command = [[unoconv, '--format=pdf', '--output={output}'.format(output=pdf_path), '--verbose', '-vvvvv', '--timeout=10', xlsx_file.name]]
process = envoy.run(command, timeout=30, env={'HOME': '/tmp'})
# Debugging
#print 'status:', process.status_code
#print 'command:', process.command
#print 'out:', process.std_out
#print 'err:', process.std_err
log.info('STDERR:\n{}'.format(process.std_err))
if process.status_code == 0:
#pdf_name = os.path.join(pdf_path, os.path.basename(xlsx_file.name).replace('.xlsx', '.pdf'))
payload = file(pdf_path, 'r').read()
#shutil.rmtree(pdf_path)
os.unlink(pdf_path)
return payload
else:
log.error('XLSX->PDF conversion failed, status={status}, command={command} ({command_cp}). Error:\n{error}'.format(
status=process.status_code, command=process.command, command_cp=' '.join(process.command), error=process.std_err))
raise OSError('XLSX->PDF conversion failed')
@staticmethod
def find_unoconv():
# Debian: aptitude install unoconv
# Mac OS X: brew install unoconv
# TODO: Make unoconv configurable via ini file
candidates = [
# LibreOffice 4.x on Mac OSX 10.7, YMMV
'/Applications/LibreOffice.app/Contents/program/LibreOfficePython.framework/bin/unoconv',
'/usr/bin/unoconv',
]
candidates += where.where('unoconv')
for candidate in candidates:
if os.path.isfile(candidate):
return candidate
class ReportMetadata(OrderedDict):
def set(self, key, value):
self[key] = value
# https://stackoverflow.com/questions/17215400/python-format-string-unused-named-arguments/17215533#17215533
def __missing__(self, key):
return u'n/a'
# Machinery for monkeypatching XlsxWriter's Worksheet's ``write_url`` method
# to deduce a link title from the url automatically using ``os.path.basename(url)``.
# Save vanilla method
Worksheet.write_url_dist = Worksheet.write_url
def write_url_deduce_title(self, row, col, url, cell_format=None, string=None, tip=None):
if string is None:
string = os.path.basename(url)
if tip is None:
tip = u'Open "{name}" in Patent Navigator'.format(name=string)
return self.write_url_dist(row, col, url, cell_format=cell_format, string=string, tip=tip)
def workbook_add_sheet_hook(self, name=None):
worksheet = self._add_sheet(name, is_chartsheet=False)
# Patch "write_url" function
worksheet.write_url = lambda *args, **kwargs: write_url_deduce_title(worksheet, *args, **kwargs)
return worksheet
def add_worksheet_monkeypatch(workbook):
workbook.add_worksheet = lambda *args, **kwargs: workbook_add_sheet_hook(workbook, *args, **kwargs)
class EmphasizingFormatterGenerator(string.Formatter):
def vgenerate(self, format_string, args, kwargs):
used_args = set()
result = self._vgenerate(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vgenerate(self, format_string, args, kwargs, used_args, recursion_depth):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
for literal_text, field_name, format_spec, conversion in\
self.parse(format_string):
# output the literal text
if literal_text:
yield literal_text
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec = self._vformat(format_spec, args, kwargs,
used_args, recursion_depth-1)
# format the object and append to the result
if 'emphasis' in kwargs:
yield kwargs['emphasis']
yield self.format_field(obj, format_spec)
|
agpl-3.0
|
mehdidc/scikit-learn
|
sklearn/covariance/tests/test_graph_lasso.py
|
37
|
2901
|
""" Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .01, .1):
covs = dict()
for method in ('cd', 'lars'):
cov_, _, costs = graph_lasso(emp_cov, alpha=alpha,
return_costs=True)
covs[method] = cov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'])
# Smoke test the estimator
model = GraphLasso(alpha=.1).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'])
assert_array_almost_equal(model.covariance_, covs['lars'])
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=3, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
bsd-3-clause
|
bitemyapp/ggplot
|
docs/conf.py
|
13
|
8495
|
# -*- coding: utf-8 -*-
#
# pandas documentation build configuration file, created by
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('.'))
# currently not used
#sys.path.insert(0, os.path.abspath('sphinxext'))
# Use the source code and not a installed version
sys.path.insert(0, os.path.abspath('../'))
sys.path.extend([
# numpy standard doc extensions
os.path.join(os.path.dirname(__file__),
'..', '../..',
'sphinxext')
])
# -- General configuration -----------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. sphinxext.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.todo',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.autosummary',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'numpydoc', # used to parse numpy-style docstrings for autodoc
]
# prevent som errors
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ggplot'
copyright = u'2013, yhat'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# importing ggplot here has some sideeffects (plots pop up during
# doc building), so use theversion extract from setup.py here as well.
def extract_version():
"""
Extracts version values from the main matplotlib __init__.py and
returns them as a dictionary.
"""
with open('../ggplot/__init__.py') as fd:
for line in fd.readlines():
if (line.startswith('__version__')):
exec(line.strip())
return locals()["__version__"]
version = '%s' % extract_version()
# The full version, including alpha/beta/rc tags.
release = version
# JP: added from sphinxdocs
autosummary_generate = True
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'nature_with_gtoc'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'statsmodels.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ggplot'
# -- Options for LaTeX output --------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ggplot.tex',
u'ggplot: Elegant Graphics for Data Analysis, now also in python',
u'yhat', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'statsmodels': ('http://statsmodels.sourceforge.net/devel/', None),
'python': ('http://docs.python.org/', None)
}
# extlinks alias
extlinks = {'issue': ('https://github.com/yhat/ggplot/issues/%s',
'GH')}
|
bsd-2-clause
|
ycaihua/scikit-learn
|
sklearn/gaussian_process/tests/test_gaussian_process.py
|
7
|
6830
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a one-dimensional Gaussian Process model.
Check random start optimization.
Test the interpolating property.
"""
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a two-dimensional Gaussian Process model accounting for
anisotropy. Check random start optimization.
Test the interpolating property.
"""
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
assert_true(np.all(gp.theta_ >= thetaL)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a two-dimensional Gaussian Process model accounting for
anisotropy. Check random start optimization.
Test the GP interpolation for 2D output
"""
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
"""
Repeat test_1d and test_2d for several built-in correlation
models specified as strings.
"""
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
"""
Repeat test_1d and test_2d with given regression weights (beta0) for
different regression models (Ordinary Kriging).
"""
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the reduced likelihood function of the optimal theta.
"""
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
|
bsd-3-clause
|
codevlabs/pandashells
|
pandashells/test/module_checker_lib_tests.py
|
7
|
1443
|
#! /usr/bin/env python
from unittest import TestCase
from pandashells.lib.module_checker_lib import check_for_modules
from pandashells.lib import module_checker_lib
from mock import patch
class ModuleCheckerTests(TestCase):
def setUp(self):
module_checker_lib.CMD_DICT['fakemodule1'] = 'pip install fakemodule1'
module_checker_lib.CMD_DICT['fakemodule2'] = 'pip install fakemodule2'
module_checker_lib.CMD_DICT['os'] = 'part of standard module'
def test_check_for_modules_unrecognized(self):
"""
check_for_modules() raises error when module is unrecognized
"""
with self.assertRaises(ValueError):
check_for_modules(['not_a_module'])
@patch('pandashells.lib.module_checker_lib.importlib.import_module')
def test_check_for_modules_no_modules(self, import_module_mock):
"""
check_for_modules() does nothing when module list is empty
"""
check_for_modules([])
self.assertFalse(import_module_mock.called)
def test_check_for_modules_existing_module(self):
"""
check_for_modules() successfully finds existing module
"""
check_for_modules(['os'])
def test_check_for_modules_bad(self):
"""
check_for_modules() correctly identifies missing modules
"""
with self.assertRaises(ImportError):
check_for_modules(['fakemodule1', 'fakemodule2'])
|
bsd-2-clause
|
Barmaley-exe/scikit-learn
|
examples/decomposition/plot_sparse_coding.py
|
247
|
3846
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
|
bsd-3-clause
|
clemkoa/scikit-learn
|
sklearn/linear_model/tests/test_sparse_coordinate_descent.py
|
94
|
10801
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
def test_same_multiple_output_sparse_dense():
for normalize in [True, False]:
l = ElasticNet(normalize=normalize)
X = [[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14]]
y = [[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15]]
ignore_warnings(l.fit)(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet(normalize=normalize)
X_sp = sp.coo_matrix(X)
ignore_warnings(l_sp.fit)(X_sp, y)
sample_sparse = sp.coo_matrix(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
|
bsd-3-clause
|
jmetzen/scikit-learn
|
sklearn/utils/setup.py
|
296
|
2884
|
import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
google-research/text-to-text-transfer-transformer
|
t5/evaluation/metrics.py
|
1
|
19383
|
# Copyright 2021 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for computing metrics.
Every function must accept a list of targets and a list of predictions and
return a dict of metrics.
Functions should assume all text inputs are unicode strings.
"""
import collections
import itertools
import re
import string
from typing import Dict, Mapping, Optional, Sequence, Tuple, Union
from absl import logging
import editdistance
import numpy as np
import sacrebleu
import scipy.stats
import sklearn.metrics
from t5.evaluation import qa_utils
from rouge_score import rouge_scorer
from rouge_score import scoring
def bleu(targets, predictions):
"""Computes BLEU score.
Args:
targets: list of strings or list of list of strings if multiple references
are present.
predictions: list of strings
Returns:
bleu_score across all targets and predictions
"""
if isinstance(targets[0], list):
targets = [[x for x in target] for target in targets]
else:
# Need to wrap targets in another list for corpus_bleu.
targets = [targets]
bleu_score = sacrebleu.corpus_bleu(predictions, targets,
smooth_method="exp",
smooth_value=0.0,
force=False,
lowercase=False,
tokenize="intl",
use_effective_order=False)
return {"bleu": bleu_score.score}
def rouge(targets, predictions, score_keys=None):
"""Computes rouge score.
Args:
targets: list of strings
predictions: list of strings
score_keys: list of strings with the keys to compute.
Returns:
dict with score_key: rouge score across all targets and predictions
"""
if score_keys is None:
score_keys = ["rouge1", "rouge2", "rougeLsum"]
scorer = rouge_scorer.RougeScorer(score_keys)
aggregator = scoring.BootstrapAggregator()
def _prepare_summary(summary):
# Make sure the summary is not bytes-type
# Add newlines between sentences so that rougeLsum is computed correctly.
summary = summary.replace(" . ", " .\n")
return summary
for prediction, target in zip(predictions, targets):
target = _prepare_summary(target)
prediction = _prepare_summary(prediction)
aggregator.add_scores(scorer.score(target=target, prediction=prediction))
result = aggregator.aggregate()
for key in score_keys:
logging.info(
"%s = %.2f, 95%% confidence [%.2f, %.2f]",
key,
result[key].mid.fmeasure*100,
result[key].low.fmeasure*100,
result[key].high.fmeasure*100,
)
return {key: result[key].mid.fmeasure*100 for key in score_keys}
def span_squad(targets, predictions):
"""Computes SQuAD metrics for span prediction tasks.
Uses qa metric function to compute EM and F1 score.
Args:
targets: list of dict of answers (list of strings) and context (string)
predictions: list of strings, each string is contains the space tokenized
ids in the format: "start: 3 end: 6"
Returns:
dict with score_key: squad score across all targets and predictions
"""
assert len(targets) == len(predictions)
def space_tok(s):
return re.sub(r"\W", " ", s).split()
def get_answer_text_from_context(context, answer_tokens):
"""Find the answer in the context given the answer tokens."""
# In the initial training iterations, the model can output garbage.
# Returning an empty string in such cases.
if len(answer_tokens) < 4:
return ""
# Model sometimes predicts words instead of numbers in the answer. Return
# an empty string in that case.
try:
start_index = int(answer_tokens[1])
end_index = int(answer_tokens[3])
except ValueError:
return ""
return " ".join(context[start_index:end_index+1])
contexts = [space_tok(t["context"]) for t in targets]
answers = [t["answers"] for t in targets]
predictions = [space_tok(p) for p in predictions]
final_predictions = [
get_answer_text_from_context(c, p) for c, p in zip(contexts, predictions)
]
return squad(answers, final_predictions)
def squad(targets, predictions):
"""Computes SQuAD metrics, maximizing over answers per question.
Args:
targets: list of lists of strings
predictions: list of strings
Returns:
dict with score_key: squad score across all targets and predictions
"""
targets = [[qa_utils.normalize_squad(t) for t in u] for u in targets]
predictions = [qa_utils.normalize_squad(p) for p in predictions]
return qa_utils.qa_metrics(targets, predictions)
def trivia_qa(targets, predictions):
"""Computes TriviaQA metrics, maximizing over answers per question.
Args:
targets: list of lists of strings
predictions: list of strings
Returns:
dict with score_key: squad score across all targets and predictions
"""
targets = [[qa_utils.normalize_trivia_qa(t) for t in u] for u in targets]
predictions = [qa_utils.normalize_trivia_qa(p) for p in predictions]
return qa_utils.qa_metrics(targets, predictions)
def accuracy(targets, predictions):
return {"accuracy": 100*sklearn.metrics.accuracy_score(targets, predictions)}
def sequence_accuracy(targets, predictions):
"""Computes per-sequence accuracy.
For each example, returns 1.0 if the target sequence EXACTLY matches the
predicted sequence. Else, 0.0.
Args:
targets: list of strings
predictions: list of strings
Returns:
float. Average sequence-level accuracy.
"""
assert len(targets) == len(predictions)
seq_acc = 100 * np.mean([p == t for p, t in zip(predictions, targets)])
logging.info("sequence_accuracy = %.2f", seq_acc)
return {"sequence_accuracy": seq_acc}
def pearson_corrcoef(targets, predictions):
"""Pearson correlation coefficient."""
return {"pearson_corrcoef":
100 * scipy.stats.pearsonr(targets, predictions)[0]}
def spearman_corrcoef(targets, predictions):
"""Spearman correlation coefficient."""
return {"spearman_corrcoef":
100 * scipy.stats.spearmanr(targets, predictions)[0]}
def mean_multiclass_f1(num_classes, **metric_fn_kwargs):
"""Computes the unweighted average of the F1 per class."""
return sklearn_metrics_wrapper(
"fbeta_score",
metric_dict_str="mean_%dclass_f1" % num_classes,
metric_post_process_fn=lambda x: 100 * x,
beta=1,
labels=range(num_classes),
average="macro",
**metric_fn_kwargs)
def exact_match(targets, predictions):
"""Computes whether the targets match predictions exactly."""
return {"exact_match": 100 * float(np.array_equal(targets, predictions))}
def f1_score_with_invalid(targets, predictions):
"""Compute F1 score, but any prediction != 0 or 1 is counted as incorrect.
Args:
targets: np.ndarray of targets, either 0 or 1
predictions: np.ndarray of predictions, any integer value
Returns:
F1 score, where any prediction != 0 or 1 is counted as wrong.
"""
targets, predictions = np.asarray(targets), np.asarray(predictions)
# Get indices of invalid predictions
invalid_idx_mask = np.logical_and(predictions != 0, predictions != 1)
# For any prediction != 0 or 1, set it to the opposite of what the target is
predictions[invalid_idx_mask] = 1 - targets[invalid_idx_mask]
return {"f1": 100 * sklearn.metrics.f1_score(targets, predictions)}
def mean_group_metric(metric_fn, group_key="group", value_key="value"):
"""Returns a metric that averages `metric_fn` on sub-groups of results.
The sub-groups are defined by aggregating results (targets and predictions)
by accessing the feature specified by `group_key` in the target dicts.
**WARNING**: Using this function can produce unreliable results if you do not
pass in full groups. For example, if you evaluate over a random subsample of a
validation set and do not retain all of the examples in each group, you may
get results which aren't directly comparable to using the full validation set.
Args:
metric_fn: function, the metric to compute on the subgroups.
group_key: string, the key for the grouping value in the target dictionary.
value_key: string, the key for the value in the dictionaries.
"""
def my_metric(targets, predictions):
"""Computes mean of `metric_fn` over subgroups of results."""
grouped_values = collections.defaultdict(lambda: ([], []))
for targ, pred in zip(targets, predictions):
g = targ[group_key]
grouped_values[g][0].append(targ[value_key])
grouped_values[g][1].append(pred[value_key])
group_scores = collections.defaultdict(list)
for (targets, predictions) in grouped_values.values():
for metric, score in metric_fn(targets, predictions).items():
group_scores[metric].append(score)
return {metric: np.mean(scores) for metric, scores in group_scores.items()}
return my_metric
def multirc_f1_over_all_answers(targets, predictions):
"""Special metric for MultiRC which computes F1 score over all examples.
This is necessary because the targets/predictions for MultiRC are dicts and
the f1_score_with_invalid expects a list of True/False labels, not dicts. As
a result we just need to key in the "value" for each of the example dicts
before feeding into f1_score_with_invalid.
Args:
targets: list of dicts, where each dict has a "value" key.
predictions: list of dicts, where each dict has a "value" key.
Returns:
F1 score over values, where any prediction != 0 or 1 is counted as wrong.
"""
return f1_score_with_invalid(
[t["value"] for t in targets], [p["value"] for p in predictions]
)
def auc(targets, predictions, targets_threshold=None):
"""Compute Area Under the ROC and PR curves.
ROC - Receiver Operating Characteristic
PR - Precision and Recall
Args:
targets: np.ndarray of targets, either 0 or 1, or continuous values.
predictions: np.ndarray of predictions, any value.
targets_threshold: float, if target values are continuous values, this
threshold binarizes them.
Returns:
A dictionary with AUC-ROC and AUC-PR scores.
"""
if targets_threshold is not None:
targets = np.array(targets)
targets = np.where(targets < targets_threshold,
np.zeros_like(targets, dtype=np.int32),
np.ones_like(targets, dtype=np.int32))
return {
"auc-roc": sklearn.metrics.roc_auc_score(targets, predictions),
"auc-pr": sklearn.metrics.average_precision_score(targets, predictions),
}
def sklearn_metrics_wrapper(metric_str,
metric_dict_str=None,
metric_post_process_fn=None,
**metric_fn_kwargs):
"""Wraps any sklearn.metric function and returns a t5 metric function.
Args:
metric_str: string, the function from `sklearn.metrics` to use.
metric_dict_str: optional string, if not specified `metric_str` is used as
the key in the returned dictionary.
metric_post_process_fn: callable, if specified the final computed metric
will be passed through this.
**metric_fn_kwargs: kwargs, passed to the metric function we are calling.
Returns:
the function that calculates the metric in a dict.
"""
if not hasattr(sklearn.metrics, metric_str):
raise ValueError("sklearn.metrics does not have: %s" % metric_str)
def fn(targets, predictions):
metric_fn = getattr(sklearn.metrics, metric_str)
metric_val = metric_fn(targets, predictions, **metric_fn_kwargs)
if metric_post_process_fn is not None:
metric_val = metric_post_process_fn(metric_val)
return {metric_dict_str or metric_str: metric_val}
return fn
def rank_classification(
targets: Sequence[Tuple[Sequence[int], bool, float]],
scores: Sequence[float],
num_classes: Optional[int] = None) -> Dict[str, Union[float, int]]:
"""Computes standard metrics classification based on log likelihood ranking.
This metric is intended to be used along with the `rank_classification`
preprocessor and postprocessor. Each example is scored (by log likelihood)
for every possible label, and the label with the best score is selected as the
prediction.
In the case of multiple labels, a prediction matching any will be considered
correct.
For problems with two labels, AUC-pr and AUC-roc retrieval metrics will be
reported for the positive class, which is assumed to have an 'idx' of 1. If
more labels are present, only accuracy and F-1 will be reported.
Args:
targets: list of tuples, the 'idx', 'is_correct' and 'weight' fields from
ground truth examples.
scores: list of float, a flat list of log likelihood scores for each
possible label for each example.
num_classes: int or None, the number of possible classes for the label or
None if the number of classes vary.
Returns:
Accuracy, f1, and AUC scores.
Raises:
ValueError: if `targets` is not a sequence of 3-tuples.
"""
assert len(targets) == len(scores)
if len(targets[0]) != 3:
raise ValueError(
f"`targets` should contain 3 elements but has {len(targets[0])}.")
idx_0 = targets[0][0]
if not hasattr(idx_0, "__len__") or len(idx_0) != 2:
raise ValueError(
"The first element of `targets` ('idx') should be 2-dimensional. "
f"Got {idx_0}.")
# Sort by 'idx' since the function relies on this assumption.
# ((idx, is_correct, weight), score)
get_idx = lambda x: x[0][0]
targets, scores = zip(*sorted(zip(targets, scores), key=get_idx))
if not num_classes:
# Assuming variable classes. Can only compute accuracy.
num_correct = 0
total = 0
# (((input idx, output idx), is_correct, weight), score)
get_grp = lambda x: x[0][0][0]
for _, grp in itertools.groupby(zip(targets, scores), get_grp):
exs, log_likelihoods = zip(*grp)
prediction = np.argmax(log_likelihoods)
weights = exs[prediction][2]
num_correct += exs[prediction][1] * weights
total += weights
return {"accuracy": 100 * num_correct / total}
assert len(targets) % num_classes == 0, f"{len(targets)} % {num_classes} != 0"
labels_indicator = np.array([is_correct for _, is_correct, _ in targets
]).reshape((-1, num_classes))
weights = np.array([weight for _, _, weight in targets]).reshape(
(-1, num_classes))[:, 0]
log_likelihoods = np.array(scores, np.float32).reshape((-1, num_classes))
predictions = log_likelihoods.argmax(-1)
if np.any(labels_indicator.sum(axis=-1) > 1):
# multiple-answer case
logging.info(
"Multiple labels detected. Predictions matching any label will be "
"considered correct.")
num_examples = len(labels_indicator)
return {
"accuracy": (100 * np.average(
labels_indicator[np.arange(num_examples), predictions],
weights=weights))
}
predictions_indicator = np.eye(num_classes)[predictions]
def exp_normalize(x):
b = x.max(-1)[:, np.newaxis]
y = np.exp(x - b)
return y / y.sum(-1)[:, np.newaxis]
probs = exp_normalize(log_likelihoods)
metrics = {
"accuracy":
100 * sklearn.metrics.accuracy_score(
labels_indicator, predictions_indicator, sample_weight=weights),
}
if num_classes > 2:
metrics.update(
mean_multiclass_f1(num_classes,
sample_weight=weights)(labels_indicator,
predictions_indicator))
logging.warning("AUC-pr and AUC-roc are not supported when num_classes > 2")
else:
metrics.update({
"f1":
100 * sklearn.metrics.f1_score(
labels_indicator.argmax(-1), predictions, sample_weight=weights)
})
labels_indicator = labels_indicator[:, 1]
probs = probs[:, 1]
metrics.update({
"auc-roc":
100 * sklearn.metrics.roc_auc_score(
labels_indicator, probs, multi_class="ovr",
sample_weight=weights, average="macro"),
"auc-pr":
100 * sklearn.metrics.average_precision_score(
labels_indicator, probs, sample_weight=weights,
average="macro"),
})
return metrics
def _coqa_tokenize(inp: str) -> Sequence[str]:
"""Normalize English text and tokenize into words based on spaces.
Adapted from official evaluation tokenization at
https://stanfordnlp.github.io/coqa/.
Args:
inp: string.
Returns:
Tokenization of normalized text as List[str]
"""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def normalize_whitespace(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
return normalize_whitespace(remove_articles(remove_punc(inp.lower()))).split()
def _sequence_f1(target_tokens: Sequence[str],
prediction_tokens: Sequence[str]) -> float:
"""Given target and prediction tokens, return token-wise F1 score."""
if not (target_tokens or prediction_tokens):
return int(target_tokens == prediction_tokens)
common_token_counts = (
collections.Counter(target_tokens) &
collections.Counter(prediction_tokens))
sum_common = sum(common_token_counts.values())
if sum_common == 0:
return 0
precision = 1.0 * sum_common / len(prediction_tokens)
recall = 1.0 * sum_common / len(target_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def coqa_f1(
targets: Sequence[Sequence[str]], predictions: Sequence[str]
) -> Mapping[str, float]:
"""Return mean sequence F1 score over all QA turns."""
f1s = []
for (t, p) in zip(targets, predictions):
assert len(t) == 1
target_tokens = _coqa_tokenize(t[0])
prediction_tokens = _coqa_tokenize(p)
f1s.append(_sequence_f1(target_tokens, prediction_tokens))
return {"f1": np.mean(np.array(f1s))}
def edit_distance(targets, predictions, lower=True):
"""Word-level edit distance between targets and predictions."""
edit_distances = []
for pred, target in zip(predictions, targets):
if lower:
pred = pred.lower()
target = target.lower()
# For simplicity, use regex-based tokenization that treats each
# contiguous chunk of characters matched by \w as a word.
pred = re.split("[^\\w]", pred)
target = re.split("[^\\w]", target)
edit_distances.append(editdistance.distance(pred, target))
return {"min_edit": min(edit_distances),
"max_edit": max(edit_distances),
"mean_edit": np.mean(edit_distances),
"median_edit": np.median(edit_distances),
"sum_edit": sum(edit_distances)}
|
apache-2.0
|
nddsg/TreeDecomps
|
xplodnTree/core/baseball.py
|
1
|
2569
|
"""
Recomputes probabilities
"""
import pandas as pd
from utils import Info
from pprint import pprint as pp
from utils import listify_rhs
def dbg(in_str):
print ("{}".format(in_str))
def recompute_probabilities(pd_data_frame):
'''
recompute probabilities
:param pd_data_frame: pd.DataFrame
:return: df
'''
Info("recompute_probabilities")
df = pd_data_frame
df = df.reset_index(drop=True)
if df.columns[0]=='rnbr':
df['rnogrp'] = df["rnbr"].apply(lambda x: x.split(".")[0])
else:
df['rnogrp'] = df[0].apply(lambda x: x.split(".")[0])
gb = df.groupby(['rnogrp']).groups
for k,v in gb.items():
kcntr = 0
# print k, v
# print
for r in v:
prob_f = df["prob"].loc[r]/sum([df["prob"].loc[x] for x in v])
# df.loc[r] = pd.Series(["{}.{}".format(k, kcntr), list(df["lhs"].loc[r]), \
# df["rhs"].loc[r], prob_f])
df.set_value(v, 'prob', prob_f)
kcntr += 1
df.drop('rnogrp', axis=1, inplace=True)
return df
#TODO WORKING on getting this to hand the mdf being passed
# as argument
# def listify_rhs(rhs_obj):
# print type (rhs_obj[0]), len(rhs_obj[0])
def recompute_probabilities_two(pd_data_frame):
df = pd_data_frame
df['rnogrp'] = df[0].apply(lambda x: x.split(".")[0])
gb = df.groupby(['rnogrp']).groups
for k,v in gb.items():
print k
print " ", len(gb[k])
ndf = df
for k,v in gb.items():
kcntr = 0
for r in v:
ndf.set_value(r, [0], "{}.{}".format(k, kcntr))
ndf.set_value(r, [3], df[[3]].loc[r].values[0]/float(len(v)))
# rhs = df[[2]].loc[r].values[0]
# ndf.loc[r]= pd.Series(["{}.{}".format(k, kcntr),
# df[[1]].loc[r].values, #list(df[[1]].loc[r].values)[0],
# listify_rhs(rhs[0]),
# df[[3]].loc[r].values[0]/float(len(v))])
# # ndf.loc[r] = pd.Series( ["{}.{}".format(k, kcntr),
# df[[1]].loc[r].values, #list(df[[1]].loc[r].values)[0],
# listify_rhs(rhs[0]),
# df[[3]].loc[r].values[0]/float(len(v))])
kcntr += 1
ndf = ndf.drop('rnogrp', axis=1)
print ndf.head()
return ndf
if __name__ == '__main__':
from sys import argv,exit
import os
if len(argv) <=1:
print '--> needs argument, provide a *.prs filename'
exit(1)
fname = argv[1]
if not os.path.exists(fname):
print 'file does not exists, try a new file'
exit(1)
df = pd.read_csv(fname, header=None, sep="\t")
df = recompute_probabilities(df)
df.to_csv(fname.split('.')[0]+"_rc.tsv", header=False, index=False, sep="\t") # rcprs = recomputed prod rules
if os.path.exists(fname.split('.')[0]+"_rc.tsv"): print 'Saved file:', fname.split('.')[0]+"_rc.tsv"
|
mit
|
jzt5132/scikit-learn
|
benchmarks/bench_plot_approximate_neighbors.py
|
244
|
6011
|
"""
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
|
bsd-3-clause
|
gfyoung/pandas
|
pandas/tests/arrays/sparse/test_dtype.py
|
2
|
5658
|
import re
import numpy as np
import pytest
import pandas as pd
from pandas.core.arrays.sparse import SparseDtype
@pytest.mark.parametrize(
"dtype, fill_value",
[
("int", 0),
("float", np.nan),
("bool", False),
("object", np.nan),
("datetime64[ns]", pd.NaT),
("timedelta64[ns]", pd.NaT),
],
)
def test_inferred_dtype(dtype, fill_value):
sparse_dtype = SparseDtype(dtype)
result = sparse_dtype.fill_value
if pd.isna(fill_value):
assert pd.isna(result) and type(result) == type(fill_value)
else:
assert result == fill_value
def test_from_sparse_dtype():
dtype = SparseDtype("float", 0)
result = SparseDtype(dtype)
assert result.fill_value == 0
def test_from_sparse_dtype_fill_value():
dtype = SparseDtype("int", 1)
result = SparseDtype(dtype, fill_value=2)
expected = SparseDtype("int", 2)
assert result == expected
@pytest.mark.parametrize(
"dtype, fill_value",
[
("int", None),
("float", None),
("bool", None),
("object", None),
("datetime64[ns]", None),
("timedelta64[ns]", None),
("int", np.nan),
("float", 0),
],
)
def test_equal(dtype, fill_value):
a = SparseDtype(dtype, fill_value)
b = SparseDtype(dtype, fill_value)
assert a == b
assert b == a
def test_nans_equal():
a = SparseDtype(float, float("nan"))
b = SparseDtype(float, np.nan)
assert a == b
assert b == a
@pytest.mark.parametrize(
"a, b",
[
(SparseDtype("float64"), SparseDtype("float32")),
(SparseDtype("float64"), SparseDtype("float64", 0)),
(SparseDtype("float64"), SparseDtype("datetime64[ns]", np.nan)),
(SparseDtype(int, pd.NaT), SparseDtype(float, pd.NaT)),
(SparseDtype("float64"), np.dtype("float64")),
],
)
def test_not_equal(a, b):
assert a != b
def test_construct_from_string_raises():
with pytest.raises(
TypeError, match="Cannot construct a 'SparseDtype' from 'not a dtype'"
):
SparseDtype.construct_from_string("not a dtype")
@pytest.mark.parametrize(
"dtype, expected",
[
(SparseDtype(int), True),
(SparseDtype(float), True),
(SparseDtype(bool), True),
(SparseDtype(object), False),
(SparseDtype(str), False),
],
)
def test_is_numeric(dtype, expected):
assert dtype._is_numeric is expected
def test_str_uses_object():
result = SparseDtype(str).subtype
assert result == np.dtype("object")
@pytest.mark.parametrize(
"string, expected",
[
("Sparse[float64]", SparseDtype(np.dtype("float64"))),
("Sparse[float32]", SparseDtype(np.dtype("float32"))),
("Sparse[int]", SparseDtype(np.dtype("int"))),
("Sparse[str]", SparseDtype(np.dtype("str"))),
("Sparse[datetime64[ns]]", SparseDtype(np.dtype("datetime64[ns]"))),
("Sparse", SparseDtype(np.dtype("float"), np.nan)),
],
)
def test_construct_from_string(string, expected):
result = SparseDtype.construct_from_string(string)
assert result == expected
@pytest.mark.parametrize(
"a, b, expected",
[
(SparseDtype(float, 0.0), SparseDtype(np.dtype("float"), 0.0), True),
(SparseDtype(int, 0), SparseDtype(int, 0), True),
(SparseDtype(float, float("nan")), SparseDtype(float, np.nan), True),
(SparseDtype(float, 0), SparseDtype(float, np.nan), False),
(SparseDtype(int, 0.0), SparseDtype(float, 0.0), False),
],
)
def test_hash_equal(a, b, expected):
result = a == b
assert result is expected
result = hash(a) == hash(b)
assert result is expected
@pytest.mark.parametrize(
"string, expected",
[
("Sparse[int]", "int"),
("Sparse[int, 0]", "int"),
("Sparse[int64]", "int64"),
("Sparse[int64, 0]", "int64"),
("Sparse[datetime64[ns], 0]", "datetime64[ns]"),
],
)
def test_parse_subtype(string, expected):
subtype, _ = SparseDtype._parse_subtype(string)
assert subtype == expected
@pytest.mark.parametrize(
"string", ["Sparse[int, 1]", "Sparse[float, 0.0]", "Sparse[bool, True]"]
)
def test_construct_from_string_fill_value_raises(string):
with pytest.raises(TypeError, match="fill_value in the string is not"):
SparseDtype.construct_from_string(string)
@pytest.mark.parametrize(
"original, dtype, expected",
[
(SparseDtype(int, 0), float, SparseDtype(float, 0.0)),
(SparseDtype(int, 1), float, SparseDtype(float, 1.0)),
(SparseDtype(int, 1), str, SparseDtype(object, "1")),
(SparseDtype(float, 1.5), int, SparseDtype(int, 1)),
],
)
def test_update_dtype(original, dtype, expected):
result = original.update_dtype(dtype)
assert result == expected
@pytest.mark.parametrize(
"original, dtype, expected_error_msg",
[
(
SparseDtype(float, np.nan),
int,
re.escape("Cannot convert non-finite values (NA or inf) to integer"),
),
(
SparseDtype(str, "abc"),
int,
re.escape("invalid literal for int() with base 10: 'abc'"),
),
],
)
def test_update_dtype_raises(original, dtype, expected_error_msg):
with pytest.raises(ValueError, match=expected_error_msg):
original.update_dtype(dtype)
def test_repr():
# GH-34352
result = str(SparseDtype("int64", fill_value=0))
expected = "Sparse[int64, 0]"
assert result == expected
result = str(SparseDtype(object, fill_value="0"))
expected = "Sparse[object, '0']"
assert result == expected
|
bsd-3-clause
|
olologin/scikit-learn
|
sklearn/datasets/tests/test_base.py
|
33
|
7160
|
import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
|
bsd-3-clause
|
ApachePointObservatory/InstrumentBlockGUI
|
instcalc.py
|
1
|
6088
|
#!/usr/bin/python
from scipy import optimize
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from operator import itemgetter
class GridData(object):
def __init__(self, data=None, bin = None):
if data is None:
raise Exception("Data must be specified to create a GridData object.")
self.data = data
self.bin = bin
def rotationAngle(self):
"""
input some type of data and calculate the rotation angle based on that data
@param data - an array of data points or a filename of data representing ....
@return phi - rotation angle of grid points wrt astronomical north
The original Igor program has two functions XFit and YFit. I believe these are fitting N lsq fits
to linear components. This is how there is a differing X and Y platescale.
"""
d = np.transpose(self.data)
print d[0],d[1],d[2],d[3]
y_pos = np.unique(d[0])
x_pos = np.unique(d[1])
#sort the data
y_arr = [[],[],[],[]]
x_arr = [[],[],[],[]]
for i,y in enumerate(x_pos):
for n,m in enumerate(d[1]):
if y == m:
x_arr[i].append([d[0][n],d[1][n],d[2][n],d[3][n]])
for i,y in enumerate(y_pos):
for n,m in enumerate(d[0]):
if y == m:
print i,n,y,m
print [d[0][n],d[1][n],d[2][n],d[3][n]]
y_arr[i].append([d[0][n],d[1][n],d[2][n],d[3][n]])
ymAng=[]
ymPlate=[]
xmAng=[]
xmPlate=[]
for index,cat in enumerate(range(len(y_arr)-1)):
fit = np.transpose(y_arr[index])
print fit
#fit the ccd x,y pos for the same x boresight
m_rot = self.fitData(fit[2],fit[3])
ymAng.append(self.rotAng(m_rot))
#call in the x telescope and x ccd data to determine plate scale
m_scale= self.fitData(fit[1],fit[3])
ymPlate.append(self.plateScale(m_scale, self.bin))
for index,cat in enumerate(range(len(x_arr)-1)):
fit = np.transpose(x_arr[index])
print fit
#fit the ccd x,y pos for the same x boresight
m_rot = self.fitData(fit[3],fit[2])
xmAng.append(self.rotAng(m_rot))
#call in the x telescope and x ccd data to determine plate scale
m_scale= self.fitData(fit[0],fit[2])
xmPlate.append(self.plateScale(m_scale, self.bin))
comb_coords = []
for i in range(len(self.data[2])):
comb_coords.append([self.data[2][i],self.data[3][i]])
arranged = sorted(comb_coords,key=itemgetter(0))
line1 = arranged[0:3]
line2 = arranged[3:6]
line3 = arranged[6:9]
line_list = np.array([line1,line2,line3])
slope_list = []
yint_list = []
for item in line_list:
print item
print item.T
slope, yint = self.fitData(item.T[0],item.T[1])
slope_list.append(slope)
yint_list.append(yint)
print xmAng, xmPlate
print ymAng, ymPlate
return slope_list,yint_list
def plateScale(self, m = None, bin = None):
"""
calculate the plate scale based on two input observations.
Change this so that it is not taking x and y data, but generalize
to accept any x linear coordinates.
Hmm, lsq fit of only x data will should return plate scale as slope. Try it.
Use the previous fitting but return block data as well as human readable data
"""
scale = m * bin
#print 'plate scale: ' +str(m) + ' pix/deg (binned)'
#print 'plate scale: ' +str(scale) + ' pix/deg (unbinned)'
#print 'scale: ' +str(1/(m/3600.)) + ' arcsec/pix (binned)'
#print 'scale: ' +str(1/(scale/3600.)) + ' arcsec/pix (unbinned)'
return scale
def rotAng(self, m = None):
x = 100
y = (m*x)
phi = self.convert(y,x)
theta = ((phi*180.)/np.pi)
#print 'rotation: ' +str(theta)
return theta
def fitData(self, x_arr = None, y_arr = None):
"""
use numpy lsq fit on the grid of data and return fit equation
@param grid - A Numpy array of ...
"""
#print x_arr, y_arr
A = np.vstack([x_arr, np.ones(len(x_arr))]).T
#print A
m,c = np.linalg.lstsq(A,y_arr)[0]
return m,c
def convert(self, x = None, y = None):
phi = np.arctan2(x, y)
return phi
def formData(self, f_in = None):
"""
if data is not in an array then lets make it into an array
"""
return
class BoresightData(object):
def __init__(self, data = None):
if data is None:
raise Exception("Data must be specified to create a GridData object.")
self.data = data
def boresightPos(self):
"""
input rotational array and return the center position
"""
circleInfo = self.findCenter()
#self.graphRing(circleInfo,canvas,fig)
return circleInfo
def calcRadius(self, x,y, xc, yc):
""" calculate the distance of each 2D points from the center (xc, yc) """
radius = np.sqrt((x-xc)**2 + (y-yc)**2)
return radius
def f(self, c, x, y):
"""
calculate the distance between the data points and the center
"""
R_i = self.calcRadius(x, y, *c)
return R_i - R_i.mean()
def findCenter(self):
"""
calculate the center of the circle or arc.
modified from: http://wiki.scipy.org/Cookbook/Least_Squares_Circle
"""
x, y = self.data[0], self.data[1]
center_estimate = np.mean(x), np.mean(y)
center, ier = optimize.leastsq(self.f, center_estimate, args=(x,y))
xc, yc = center
R_i = self.calcRadius(x, y, *center)
R = R_i.mean()
res = np.sum((R_i - R)**2)
return xc, yc, R
|
mit
|
RTHMaK/RPGOne
|
doc/examples/edges/plot_medial_transform.py
|
11
|
2257
|
"""
===========================
Medial axis skeletonization
===========================
The medial axis of an object is the set of all points having more than one
closest point on the object's boundary. It is often called the **topological
skeleton**, because it is a 1-pixel wide skeleton of the object, with the same
connectivity as the original object.
Here, we use the medial axis transform to compute the width of the foreground
objects. As the function ``medial_axis`` (``skimage.morphology.medial_axis``)
returns the distance transform in addition to the medial axis (with the keyword
argument ``return_distance=True``), it is possible to compute the distance to
the background for all points of the medial axis with this function. This gives
an estimate of the local width of the objects.
For a skeleton with fewer branches, there exists another skeletonization
algorithm in ``skimage``: ``skimage.morphology.skeletonize``, that computes
a skeleton by iterative morphological thinnings.
"""
import numpy as np
from scipy import ndimage as ndi
from skimage.morphology import medial_axis
import matplotlib.pyplot as plt
def microstructure(l=256):
"""
Synthetic binary data: binary microstructure with blobs.
Parameters
----------
l: int, optional
linear size of the returned image
"""
n = 5
x, y = np.ogrid[0:l, 0:l]
mask = np.zeros((l, l))
generator = np.random.RandomState(1)
points = l * generator.rand(2, n**2)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndi.gaussian_filter(mask, sigma=l/(4.*n))
return mask > mask.mean()
data = microstructure(l=64)
# Compute the medial axis (skeleton) and the distance transform
skel, distance = medial_axis(data, return_distance=True)
# Distance to the background for pixels of the skeleton
dist_on_skel = distance * skel
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax1.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
ax1.axis('off')
ax2.imshow(dist_on_skel, cmap=plt.cm.spectral, interpolation='nearest')
ax2.contour(data, [0.5], colors='w')
ax2.axis('off')
fig.tight_layout()
plt.show()
|
apache-2.0
|
atamazian/traffic-proc-tools
|
gg1_parallel.py
|
1
|
5189
|
#Program simulate queueing system in parallel
#Copyright (C) 2016 by Araik Tamazian, Viet Duc Nguyen
import pandas as pd
import numpy as np
from gg1_function import simulate_gg1, qexp_rate, rand_qexp, simulate_mm1
import matplotlib.pyplot as plt
import timeit
import multiprocessing
from joblib import Parallel, delayed
#Import external file containing empirical data (time and size)
infile = 'ses_20081013.txt'
data = pd.read_csv(infile,delim_whitespace = True, header=None, na_filter = True) #Read file without header and using space ' ' to seperate between columns
data.columns = ['time','ip', 'numcon', 'size'] # Sign names for columns
data = data[data.time >= 0] #Drop negative time variable
time1 = np.asarray(data['time'])
time1 = np.diff(time1)
time = np.insert(time1,0,0.0)
time = time/np.mean(time)
ssize = data['size']
ssize = ssize/np.mean(ssize)
print 'len time:', len(time)
print 'len size: ', len(ssize)
#Import surrogate time
timess = pd.read_csv('sur_time_20081013_2.txt')
timess = np.asarray(timess)
timess = timess.flatten()
ssizess = pd.read_csv('sur_size_20081013_2.txt')
ssizess = np.asarray(ssizess)
ssizess = ssizess.flatten()
print 'len timess:',len(timess)
print 'len ssizess: ',len(ssizess)
#Declare variables
res_mm1 =[] #Variale gets results of simulating MM1
n = 10**6 #Number of data samples (data lines) needed simulating
c=np.logspace(np.log10(1.2), np.log10(10),10) #Values of throughput
inputs = range(len(c)) #Variable for loop in parallel
#Function for simulating queueing system with empirical data in parallel via throughput
def processInput(i):
return simulate_gg1(n,time,ssize/c[i])
#Function for simulating queueing system with surrogate data in parallel via throughput
def processInputs(i):
return simulate_gg1(n,timess,ssizess/c[i])
if __name__ == '__main__':
start_time = timeit.default_timer() #Variale for calculate time of simulation
#Simulate queueing system with empirical data, surrogate data and MM1
res_real = Parallel(n_jobs=3)(delayed(processInput)(i) for i in inputs) #With empirical data
print 'empirical data:', res_real
res_sur = Parallel(n_jobs=3)(delayed(processInputs)(i) for i in inputs) #With surrogate data
print 'Surrogate data:', res_sur
for i in range(len(c)): #Queueing system MM1
res_mm1.append(simulate_mm1(1/np.mean(time), 1/np.mean(ssize/c[i])))
#Convert results from array into data.frame using thoughput like index and save into txt files
res_real = pd.DataFrame(data = res_real,index=c, columns=['Ur','Wr','Lr'])
res_sur = pd.DataFrame(data = res_sur,index=c, columns=['Us','Ws','Ls'])
res_mm1 = pd.DataFrame(data = res_mm1,index=c, columns=['Um','Wm','Lm'])
res_real.to_csv("simres_real.txt", sep=" ", header=False)
res_sur.to_csv("simres_sim.txt", sep=" ", header=False)
res_mm1.to_csv("simres_mm1.txt", sep=" ", header=False)
#------Plot multigraphs in one panel (subplot)-------
#Plot Utilization versus throughput
plt.subplot(1,3,1) #plt.subplot(row,column, position)
plt.plot(c,res_real['Ur'], color = 'black',linewidth = 2)
plt.plot(c,res_sur['Us'], color = 'red',linewidth = 2)
plt.plot(c,res_mm1['Um'], color = 'green',linewidth = 2)
plt.xlabel('Channel throughput, C', size = 15)
plt.ylabel('Utilization, U', size = 15)
plt.yscale('log')
plt.xscale('log')
plt.xticks(color='black', size=15)
plt.yticks(color='black', size=15)
plt.legend(loc = "upper right")
#Plot Average sojourn time in system versus throughput
plt.subplot(1,3,2)
plt.plot(c,res_real['Wr'], color = 'black',linewidth = 2)
plt.plot(c,res_sur['Ws'], color = 'red',linewidth = 2)
plt.plot(c,res_mm1['Wm'], color = 'green',linewidth = 2)
plt.xlabel('Channel throughput, C', size = 15)
plt.ylabel('Average soujourn time,W', size = 15)
plt.yscale('log')
plt.xscale('log')
plt.xticks(color='black', size=15)
plt.yticks(color='black', size=15)
plt.legend(loc = "upper right")
#Plot Average number of requests in system versus throughput
plt.subplot(1,3,3)
plt.loglog(c,res_real['Lr'], color = 'black',linewidth = 2)
plt.loglog(c,res_sur['Ls'], color = 'red',linewidth = 2)
plt.loglog(c,res_mm1['Lm'], color = 'green',linewidth = 2)
plt.xlabel('Channel throughput, C', size = 15)
plt.ylabel('Average request in system, L', size = 15)
plt.xticks(color='black', size=15)
plt.yticks(color='black', size=15)
plt.yscale('log')
plt.xscale('log')
plt.legend(loc = "upper right")
stop_time = timeit.default_timer()
time_simulation = stop_time - start_time
print 'simulation time: ', time_simulation
plt.savefig('n206.png')
plt.show()
#---------------end-----------------------------------------------------------------------------------------------------
|
mit
|
florian-wagner/gimli
|
python/pygimli/physics/em/fdem.py
|
1
|
21421
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Was macht das Ding
'''
import sys
import pygimli as pg
from pygimli.viewer import show1dmodel, drawModel1D
import numpy as np
import matplotlib.pyplot as plt
class NDMatrix(pg.RBlockMatrix):
"""
Diagonal block (block Jacobi) matrix derived from pg.BlockMatrix
(to be moved to a better place)
"""
def __init__(self, num, nrows, ncols):
super(type(self), self).__init__() # call inherited init function
self.Ji = [] # list of individual block matrices
for i in range(num):
self.Ji.append(pg.RMatrix())
self.Ji[-1].resize(nrows, ncols)
n = self.addMatrix(self.Ji[-1])
self.addMatrixEntry(n, nrows*i, ncols*i)
self.recalcMatrixSize()
print(self.rows(), self.cols())
def xfplot(ax, DATA, x, freq, everyx=5, orientation='horizontal', aspect=40):
"""
Plots a matrix according to x and frequencies
"""
nt = list(range(0, len(x), everyx))
im = ax.imshow(DATA.T, interpolation='nearest')
ax.set_ylim(plt.ylim()[::-1])
ax.set_xticks(nt)
ax.set_xticklabels(["%g" % xi for xi in x[nt]])
ax.set_yticks(list(range(0, len(freq)+1, 2)))
ax.set_yticklabels(["%g" % freq[i] for i in range(0, len(freq), 2)])
ax.set_xlabel('x [m]')
ax.set_ylabel('f [Hz]')
plt.colorbar(im, ax=ax, orientation=orientation, aspect=aspect)
return im
class FDEM2dFOPold(pg.ModellingBase):
"""
"""
def __init__(self, data, nlay=2, verbose=False):
"""
"""
pg.ModellingBase.__init__(self, verbose)
self.nlay = nlay
self.FOP1d = data.FOP(nlay)
self.nx = len(data.x)
self.nf = len(data.freq())
self.mesh_ = pg.createMesh1D(self.nx, 2*nlay-1)
self.setMesh(self.mesh_)
def response(self, model):
"""
"""
modA = np.asarray(model).reshape((self.nlay*2-1, self.nx)).T
resp = pg.RVector(0)
for modi in modA:
resp = pg.cat(resp, self.FOP1d.response(modi))
return resp
class FDEM2dFOP(pg.ModellingBase):
"""
FDEM 2d-LCI modelling class based on BlockMatrices
"""
def __init__(self, data, nlay=2, verbose=False):
"""
"""
super(FDEM2dFOP, self).__init__(verbose)
self.nlay = nlay
self.FOP = data.FOP(nlay)
self.nx = len(data.x)
self.nf = len(data.freq())
npar = 2 * nlay - 1
self.mesh1d = pg.createMesh1D(self.nx, npar)
self.mesh_ = pg.createMesh1D(self.nx, 2*nlay-1)
self.setMesh(self.mesh_)
# self.J = NDMatrix(self.nx, self.nf*2, npar)
self.J = pg.RBlockMatrix()
self.FOP1d = []
for i in range(self.nx):
self.FOP1d.append(pg.FDEM1dModelling(nlay, data.freq(),
data.coilSpacing, -data.height))
n = self.J.addMatrix(self.FOP1d[-1].jacobian())
self.J.addMatrixEntry(n, self.nf*2*i, npar*i)
self.J.recalcMatrixSize()
print(self.J.rows(), self.J.cols())
def response(self, model):
"""
"""
modA = np.asarray(model).reshape((self.nlay*2-1, self.nx)).T
resp = pg.RVector(0)
for modi in modA:
resp = pg.cat(resp, self.FOP.response(modi))
return resp
def createJacobian(self, model):
modA = np.asarray(model).reshape((self.nlay*2-1, self.nx)).T
for i in range(self.nx):
self.FOP1d[i].createJacobian(modA[i])
class FDEMData():
"""
Class for managing Frequency Domain EM data and their inversions
"""
def __init__(self, x=None, freqs=None,
coilSpacing=None, inphase=None, outphase=None,
filename=None, scaleFreeAir=False):
"""
Initialize data class and load data. Provide filename or data.
If filename is given, data is loaded, overwriting settings.
Parameters
----------
x: array
Array of measurement positions
freq: array
Measured frequencies
coilSpacing : float
Distance between 2 two coils
inphase : array
real part of |amplitude| * \exp^{i phase}
outphase : array
imaginary part of |amplitude| * \exp^{i phase}
filename : str
Filename to read from. Supported: .xyz (MaxMin), *.txt (Emsys)
scaleFreeAir : bool
Scale inphase and outphase data by free air solution (primary field)
"""
if isinstance(x, str) and freqs is None: # string/file init
filename = x
self.x = x
self.frequencies = freqs
self.coilSpacing = coilSpacing
self.IP = inphase
self.OP = outphase
self.ERR = None
self.height = 1.0
if filename:
# check if filename extension is TXT or CSV
fl = filename.lower()
if fl.rfind('.txt') > 0 or fl.rfind('.csv') > 0:
self.importEmsysAsciiData(filename)
else:
self.importMaxminData(filename)
self.isActiveFreq = self.frequencies > 0.0
self.activeFreq = np.nonzero(self.isActiveFreq)[0]
if scaleFreeAir:
freeAirSolution = self.FOP().freeAirSolution()
self.IP /= freeAirSolution
self.OP /= freeAirSolution
def __repr__(self):
if self.x is None:
return "<FDEMdata: sounding with %d frequencies, " \
"coil spacing is %.1f>" % \
(len(self.frequencies), self.coilSpacing)
else:
return "<FDEMdata: %d soundings with each %d frequencies, " \
"coil spacing is %.1f>" % \
(len(self.x), len(self.frequencies), self.coilSpacing)
def importEmsysAsciiData(self, filename, verbose=False):
"""
Import data from emsys text export:
yields: positions, data, frequencies, error and geometry
"""
xx, sep, f, pf, ip, op, hmod, q = np.loadtxt(filename, skiprows=1, usecols=(1,4,6,8,9,12,15,16), unpack=True)
err = q / pf * 100. # percentage of primary field
if len(np.unique(sep)) > 1:
print("Warning! Several coil spacings present in file!")
self.coilSpacing = np.median(sep)
f = np.round_(f)
self.frequencies, mf, nf = np.unique(f, True, True)
x, mx, nx = np.unique(xx, True, True)
self.IP = np.ones((len(x), len(f))) * np.nan
self.OP = np.ones((len(x), len(f))) * np.nan
self.ERR = np.ones((len(x), len(f))) * np.nan
for i in range(len(f)):
# print(i, nx[i], nf[i])
self.IP[nx[i], nf[i]] = ip[i]
self.OP[nx[i], nf[i]] = op[i]
self.ERR[nx[i], nf[i]] = err[i]
def importMaxminData(self, filename, verbose=False):
"""
Import MaxMin IPX format with pos, data, frequencies and geometry.
"""
delim = None
fid = open(filename)
for i, aline in enumerate(fid):
if aline.split()[0][0].isdigit(): # number found
break
elif aline.find('COIL') > 0: # [:6] == '/ COIL':
self.coilSpacing = float(aline.replace(':', ': ').split()[-2])
elif aline.find('FREQ') > 0: # [:6] == '/ FREQ':
mya = aline[aline.find(':')+1:].replace(',', ' ').split()
myf = [float(aa) for aa in mya if aa[0].isdigit()]
self.frequencies = np.array(myf)
fid.close()
if verbose:
print("CS=", self.coilSpacing, "F=", self.frequencies)
if aline.find(',') > 0:
delim = ','
nf = len(self.frequencies)
if verbose:
print("delim=", delim, "nf=", nf)
A = np.loadtxt(filename, skiprows=i, delimiter=delim).T
x, y, self.IP, self.OP = A[0], A[1], A[2:nf*2+2:2].T, A[3:nf*2+2:2].T
if max(x) == min(x):
self.x = y
else:
self.x = x
def deactivate(self, fr):
"""
Deactivate a single frequency
"""
fi = np.nonzero(np.absolute(self.frequencies/fr - 1.) < 0.1)
self.isActiveFreq[fi] = False
self.activeFreq = np.nonzero(self.isActiveFreq)[0]
def freq(self):
"""
Return active (i.e., nonzero frequencies)
"""
return self.frequencies[self.activeFreq]
def FOP(self, nlay=2):
"""
Retrieve forward modelling operator using a block discretization
Parameters
----------
nlay : int
Number of blocks
"""
return pg.FDEM1dModelling(nlay, self.freq(), self.coilSpacing,
-self.height)
def FOPsmooth(self, zvec):
"""
Retrieve forward modelling operator using fixed layers
(smooth inversion)
Parameters
----------
zvec : array
???
"""
return pg.FDEM1dRhoModelling(zvec, self.freq(), self.coilSpacing,
-self.height)
def selectData(self, xpos=0):
"""
Retrieve inphase, outphase and error(if exist) vector from index
or near given position
Return: array, array, array|None
"""
# check for index
if isinstance(xpos, int) and (xpos < len(self.x)) and (xpos >= 0):
n = xpos
else:
n = np.argmin(np.absolute(self.x - xpos))
if self.ERR is not None:
return (self.IP[n, self.activeFreq], self.OP[n, self.activeFreq],
self.ERR[n, self.activeFreq])
else:
return (self.IP[n, self.activeFreq], self.OP[n, self.activeFreq],
None)
def error(self, xpos=0):
"""
Return error vector
"""
ip, op, err = self.selectData(xpos)
return err
def datavec(self, xpos=0):
"""
Extract data vector (stack in and out phase) for given pos/no
"""
ip, op, err = self.selectData(xpos)
return pg.asvector(np.hstack((ip, op)))
def errorvec(self, xpos=0, minvalue=0.0):
"""
Extract error vector for a give position or sounding number
"""
ip, op, err = self.selectData(xpos)
return pg.asvector(np.tile(np.maximum(err * 0.7071, minvalue), 2))
def invBlock(self, xpos=0, nlay=2, noise=1.0,
stmod=30., lam=100., lBound=1., uBound=0., verbose=False):
"""
Yield gimli inversion instance for block inversion
inv(xpos,nlay) where nlay can be a FOP or a number of layers
Parameters
----------
xpos : array
nLay : int
Number of layers of the model to be determined
noise : float
Absolute data err in percent
stmod : float
Constant Starting model
lam : float
Global regularization parameter lambda.
lBound : float
Lower boundary for the model
uBound : float
Upper boundary for the model. 0 means no upper booundary
verbose : bool
Be verbose
"""
self.transThk = pg.RTransLog()
self.transRes = pg.RTransLogLU(lBound, uBound)
self.transData = pg.RTrans()
# EM forward operator
if isinstance(nlay, pg.FDEM1dModelling):
self.fop = nlay
else:
self.fop = self.FOP(nlay)
data = self.datavec(xpos)
self.fop.region(0).setTransModel(self.transThk)
self.fop.region(1).setTransModel(self.transRes)
if isinstance(noise, float):
noiseVec = pg.RVector(len(data), noise)
else:
noiseVec = pg.asvector(noise)
# independent EM inversion
self.inv = pg.RInversion(data, self.fop, self.transData, verbose)
if isinstance(stmod, float): # real model given
model = pg.RVector(nlay * 2 - 1, stmod)
model[0] = 2.
else:
if len(stmod) == nlay*2-1:
model = pg.asvector(stmod)
else:
model = pg.RVector(nlay*2-1, 30.)
self.inv.setAbsoluteError(noiseVec)
self.inv.setLambda(lam)
self.inv.setMarquardtScheme(0.8)
self.inv.setDeltaPhiAbortPercent(0.5)
self.inv.setModel(model)
self.inv.setReferenceModel(model)
return self.inv
def plotData(self, xpos=0, response=None, error=None, ax=None,
marker='bo-', rmarker='rx-', clf=True, addlabel='', nv=2):
"""
Plot data as curves at given position
"""
ip, op, err = self.selectData(xpos)
if error is not None and err is not None:
error = err
fr = self.freq()
if ax is None:
fig, ax = plt.subplots(nrows=1, ncols=nv)
ipax = ax[-1]
else:
ipax = ax[0]
markersize = 4
if error is not None:
markersize = 2
ipax.semilogy(ip, fr, marker, label='obs'+addlabel,
markersize=markersize)
if error is not None and len(error) == len(ip):
ipax.errorbar(ip, fr, xerr=error)
# ipax.set_axis('tight')
if error is not None:
ipax.ylim((min(fr) * .98, max(fr) * 1.02))
ipax.grid(True)
ipax.set_xlabel('inphase [%]')
ipax.set_ylabel('f [Hz]')
if response is not None:
rip = np.asarray(response)[:len(ip)]
ipax.semilogy(rip, fr, rmarker, label='syn' + addlabel)
ipax.legend(loc='best')
opax = None
if ax is None:
opax = plt.subplot(1, nv, nv)
else:
opax = ax[1]
opax.semilogy(op, fr, marker, label='obs'+addlabel,
markersize=markersize)
if error is not None and len(error) == len(ip):
opax.errorbar(op, fr, xerr=error)
if response is not None:
rop = np.asarray(response)[len(ip):]
opax.semilogy(rop, fr, rmarker, label='syn'+addlabel)
#opax.set_axis('tight')
if error is not None:
opax.ylim((min(fr) * .98, max(fr) * 1.02))
opax.grid(True)
opax.set_xlabel('outphase [%]')
opax.set_ylabel('f [Hz]')
opax.legend(loc='best')
# plt.subplot(1, nv, 1)
return
def plotDataOld(self, xpos=0, response=None,
marker='bo-', rmarker='rx-', clf=True):
"""
plot data as curves at given position
"""
ip, op = self.selectData(xpos)
fr = self.freq()
if clf: plt.clf()
plt.subplot(121)
plt.semilogy(ip, fr, marker, label='obs')
plt.axis('tight')
plt.grid(True)
plt.xlabel('inphase [%]')
plt.ylabel('f [Hz]')
if response is not None:
rip = np.asarray(response)[:len(ip)]
plt.semilogy(rip, fr, rmarker, label='syn')
plt.legend(loc='best')
plt.subplot(122)
plt.semilogy(op, fr, marker, label='obs')
if response is not None:
rop = np.asarray(response)[len(ip):]
plt.semilogy(rop, fr, rmarker, label='syn')
plt.axis('tight')
plt.grid(True)
plt.xlabel('outphase [%]')
plt.ylabel('f [Hz]')
plt.legend(loc='best')
plt.show()
return
def showModelAndData(self, model, xpos=0, response=None, figsize=(8, 6)):
"""
"""
fig, ax = plt.subplots(1, 3, figsize=figsize)
model = np.asarray(model)
nlay = (len(model) + 1) / 2
thk = model[:nlay-1]
res = model[nlay-1: 2*nlay-1]
drawModel1D(ax[0], thk, res, plotfunction='semilogx')
self.plotData(xpos, response, ax=ax[1:3], clf=False)
return fig, ax
def plotAllData(self, allF=True, orientation='horizontal',
outname=None, show=False, figsize=(11, 6)):
"""
Plot data along a profile as image plots for IP and OP
"""
if self.x is None:
raise Exception("No measurement position array x given")
freq = self.freq()
np = 2
if self.ERR is not None:
np = 3
fig, ax = plt.subplots(ncols=np, nrows=1, figsize=figsize)
xfplot(ax[0], self.IP[:, self.activeFreq], self.x, freq,
orientation=orientation)
ax[0].set_title('inphase percent')
xfplot(ax[1], self.OP[:, self.activeFreq], self.x, freq,
orientation=orientation)
ax[1].set_title('outphase percent')
if self.ERR is not None:
xfplot(ax[2], self.ERR[:, self.activeFreq], self.x, freq,
orientation=orientation)
ax[2].set_title('error percent')
if outname is not None:
plt.savefig(outname)
if show:
plt.show()
return
def plotModelAndData(self, model, xpos, response,
modelL=None, modelU=None):
self.plotData(xpos, response, nv=3)
show1dmodel(model, color='blue')
if modelL is not None and modelU is not None:
pass
# draw1dmodelErr(model, modelL, modelU) # !!!!
return
def FOP2d(self, nlay):
""" 2d forward modelling operator """
return FDEM2dFOP(self, nlay)
def inv2D(self, nlay, lam=100., resL=1., resU=1000., thkL=1.,
thkU=100., minErr=1.0):
"""
2d LCI inversion class
"""
if isinstance(nlay, int):
modVec = pg.RVector(nlay * 2 - 1, 30.)
cType = 0 # no reference model
else:
modVec = nlay
cType = 10 # use this as referencemodel
nlay = (len(modVec) + 1) / 2
# init forward operator
self.f2d = self.FOP2d(nlay)
# transformations
self.tD = pg.RTrans()
self.tThk = pg.RTransLogLU(thkL, thkU)
self.tRes = pg.RTransLogLU(resL, resU)
for i in range(nlay-1):
self.f2d.region(i).setTransModel(self.tThk)
for i in range(nlay-1, nlay*2-1):
self.f2d.region(i).setTransModel(self.tRes)
# set constraints
self.f2d.region(0).setConstraintType(cType)
self.f2d.region(1).setConstraintType(cType)
# collect data vector
datvec = pg.RVector(0)
for i in range(len(self.x)):
datvec = pg.cat(datvec, self.datavec(i))
# collect error vector
if self.ERR is None:
error = 1.0
else:
error = []
for i in range(len(self.x)):
err = np.maximum(self.ERR[i][self.activeFreq] * 0.701, minErr)
error.extend(err)
# generate starting model by repetition
model = pg.asvector(np.repeat(modVec, len(self.x)))
INV = pg.RInversion(datvec, self.f2d, self.tD)
INV.setAbsoluteError(error)
INV.setLambda(lam)
INV.setModel(model)
INV.setReferenceModel(model)
return INV
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] fdem",
version="%prog: " + pg.__version__)
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="be verbose", default=False)
parser.add_option("-n", "--nLayers", dest="nlay", help="number of layers",
type="int", default="4")
parser.add_option("-x", "--xPos", dest="xpos", help="position/no to use",
type="float", default="0")
parser.add_option("-l", "--lambda", dest="lam", help="init regularization",
type="float", default="30")
parser.add_option("-e", "--error", dest="err", help="error estimate",
type="float", default="1")
(options, args) = parser.parse_args()
if options.verbose:
__verbose__ = True
A = NDMatrix(13, 6, 5)
fdem = FDEMData('example.xyz')
print(fdem)
# fop2d = FDEM2dFOP(fdem, nlay=3)
# raise SystemExit
if len(args) == 0:
parser.print_help()
print("Please add a mesh or model name.")
sys.exit(2)
else:
datafile = args[0]
nlay = options.nlay
xpos = options.xpos
name = datafile.lower().rstrip('.xyz')
fdem = FDEMData(datafile)
print(fdem)
fdem.deactivate(56320.) # do not use highest frequency
fdem.plotAllData(outname=name+'-alldata.pdf')
INV = fdem.invBlock(xpos=xpos, lam=options.lam, nlay=options.nlay,
noise=options.err, verbose=False)
model = np.asarray(INV.run())
INV.echoStatus()
print("thk = ", model[:nlay-1])
print("res = ", model[nlay-1:])
fig, ax = fdem.showModelAndData(model, xpos, INV.response())
INV = fdem.inv2D(options.nlay)
INV.run()
# fig.savefig(name+str(xpos)+'-result.pdf', bbox_inches='tight')
plt.show()
|
gpl-3.0
|
hainm/scikit-learn
|
examples/text/hashing_vs_dict_vectorizer.py
|
284
|
3265
|
"""
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
|
bsd-3-clause
|
zihua/scikit-learn
|
sklearn/grid_search.py
|
6
|
38777
|
"""
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
|
bsd-3-clause
|
bigdataelephants/scikit-learn
|
sklearn/feature_extraction/hashing.py
|
24
|
5668
|
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
|
bsd-3-clause
|
wazeerzulfikar/scikit-learn
|
sklearn/mixture/tests/test_bayesian_mixture.py
|
84
|
17929
|
# Author: Wei Xue <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.special import gammaln
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_almost_equal
from sklearn.mixture.bayesian_mixture import _log_dirichlet_norm
from sklearn.mixture.bayesian_mixture import _log_wishart_norm
from sklearn.mixture import BayesianGaussianMixture
from sklearn.mixture.tests.test_gaussian_mixture import RandomData
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_greater_equal, ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
PRIOR_TYPE = ['dirichlet_process', 'dirichlet_distribution']
def test_log_dirichlet_norm():
rng = np.random.RandomState(0)
weight_concentration = rng.rand(2)
expected_norm = (gammaln(np.sum(weight_concentration)) -
np.sum(gammaln(weight_concentration)))
predected_norm = _log_dirichlet_norm(weight_concentration)
assert_almost_equal(expected_norm, predected_norm)
def test_log_wishart_norm():
rng = np.random.RandomState(0)
n_components, n_features = 5, 2
degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.
log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components))
expected_norm = np.empty(5)
for k, (degrees_of_freedom_k, log_det_k) in enumerate(
zip(degrees_of_freedom, log_det_precisions_chol)):
expected_norm[k] = -(
degrees_of_freedom_k * (log_det_k + .5 * n_features * np.log(2.)) +
np.sum(gammaln(.5 * (degrees_of_freedom_k -
np.arange(0, n_features)[:, np.newaxis])), 0))
predected_norm = _log_wishart_norm(degrees_of_freedom,
log_det_precisions_chol, n_features)
assert_almost_equal(expected_norm, predected_norm)
def test_bayesian_mixture_covariance_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
covariance_type = 'bad_covariance_type'
bgmm = BayesianGaussianMixture(covariance_type=covariance_type,
random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type, bgmm.fit, X)
def test_bayesian_mixture_weight_concentration_prior_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
bad_prior_type = 'bad_prior_type'
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=bad_prior_type, random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'weight_concentration_prior_type':"
" %s 'weight_concentration_prior_type' should be in "
"['dirichlet_process', 'dirichlet_distribution']"
% bad_prior_type, bgmm.fit, X)
def test_bayesian_mixture_weights_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 5, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of weight_concentration_prior
bad_weight_concentration_prior_ = 0.
bgmm = BayesianGaussianMixture(
weight_concentration_prior=bad_weight_concentration_prior_,
random_state=0)
assert_raise_message(ValueError,
"The parameter 'weight_concentration_prior' "
"should be greater than 0., but got %.3f."
% bad_weight_concentration_prior_,
bgmm.fit, X)
# Check correct init for a given value of weight_concentration_prior
weight_concentration_prior = rng.rand()
bgmm = BayesianGaussianMixture(
weight_concentration_prior=weight_concentration_prior,
random_state=rng).fit(X)
assert_almost_equal(weight_concentration_prior,
bgmm.weight_concentration_prior_)
# Check correct init for the default value of weight_concentration_prior
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(1. / n_components, bgmm.weight_concentration_prior_)
def test_bayesian_mixture_means_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 3, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of mean_precision_prior
bad_mean_precision_prior_ = 0.
bgmm = BayesianGaussianMixture(
mean_precision_prior=bad_mean_precision_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'mean_precision_prior' should be "
"greater than 0., but got %.3f."
% bad_mean_precision_prior_,
bgmm.fit, X)
# Check correct init for a given value of mean_precision_prior
mean_precision_prior = rng.rand()
bgmm = BayesianGaussianMixture(
mean_precision_prior=mean_precision_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_)
# Check correct init for the default value of mean_precision_prior
bgmm = BayesianGaussianMixture(random_state=rng).fit(X)
assert_almost_equal(1., bgmm.mean_precision_prior_)
# Check raise message for a bad shape of mean_prior
mean_prior = rng.rand(n_features + 1)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
bgmm.fit, X)
# Check correct init for a given value of mean_prior
mean_prior = rng.rand(n_features)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_prior, bgmm.mean_prior_)
# Check correct init for the default value of bemean_priorta
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_)
def test_bayesian_mixture_precisions_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of degrees_of_freedom_prior
bad_degrees_of_freedom_prior_ = n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=bad_degrees_of_freedom_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'degrees_of_freedom_prior' should be "
"greater than %d, but got %.3f."
% (n_features - 1, bad_degrees_of_freedom_prior_),
bgmm.fit, X)
# Check correct init for a given value of degrees_of_freedom_prior
degrees_of_freedom_prior = rng.rand() + n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior,
bgmm.degrees_of_freedom_prior_)
# Check correct init for the default value of degrees_of_freedom_prior
degrees_of_freedom_prior_default = n_features
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior_default,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior_default,
bgmm.degrees_of_freedom_prior_)
# Check correct init for a given value of covariance_prior
covariance_prior = {
'full': np.cov(X.T, bias=1) + 10,
'tied': np.cov(X.T, bias=1) + 5,
'diag': np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3,
'spherical': rng.rand()}
bgmm = BayesianGaussianMixture(random_state=rng)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.covariance_prior = covariance_prior[cov_type]
bgmm.fit(X)
assert_almost_equal(covariance_prior[cov_type],
bgmm.covariance_prior_)
# Check raise message for a bad spherical value of covariance_prior
bad_covariance_prior_ = -1.
bgmm = BayesianGaussianMixture(covariance_type='spherical',
covariance_prior=bad_covariance_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'spherical covariance_prior' "
"should be greater than 0., but got %.3f."
% bad_covariance_prior_,
bgmm.fit, X)
# Check correct init for the default value of covariance_prior
covariance_prior_default = {
'full': np.atleast_2d(np.cov(X.T)),
'tied': np.atleast_2d(np.cov(X.T)),
'diag': np.var(X, axis=0, ddof=1),
'spherical': np.var(X, axis=0, ddof=1).mean()}
bgmm = BayesianGaussianMixture(random_state=0)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.fit(X)
assert_almost_equal(covariance_prior_default[cov_type],
bgmm.covariance_prior_)
def test_bayesian_mixture_check_is_fitted():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
# Check raise message
bgmm = BayesianGaussianMixture(random_state=rng)
X = rng.rand(n_samples, n_features)
assert_raise_message(ValueError,
'This BayesianGaussianMixture instance is not '
'fitted yet.', bgmm.score, X)
def test_bayesian_mixture_weights():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Case Dirichlet distribution for the weight concentration prior type
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_distribution",
n_components=3, random_state=rng).fit(X)
expected_weights = (bgmm.weight_concentration_ /
np.sum(bgmm.weight_concentration_))
assert_almost_equal(expected_weights, bgmm.weights_)
assert_almost_equal(np.sum(bgmm.weights_), 1.0)
# Case Dirichlet process for the weight concentration prior type
dpgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=3, random_state=rng).fit(X)
weight_dirichlet_sum = (dpgmm.weight_concentration_[0] +
dpgmm.weight_concentration_[1])
tmp = dpgmm.weight_concentration_[1] / weight_dirichlet_sum
expected_weights = (dpgmm.weight_concentration_[0] / weight_dirichlet_sum *
np.hstack((1, np.cumprod(tmp[:-1]))))
expected_weights /= np.sum(expected_weights)
assert_almost_equal(expected_weights, dpgmm.weights_)
assert_almost_equal(np.sum(dpgmm.weights_), 1.0)
@ignore_warnings(category=ConvergenceWarning)
def test_monotonic_likelihood():
# We check that each step of the each step of variational inference without
# regularization improve monotonically the training set of the bound
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=20)
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type=covar_type,
warm_start=True, max_iter=1, random_state=rng, tol=1e-4)
current_lower_bound = -np.infty
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_lower_bound = current_lower_bound
current_lower_bound = bgmm.fit(X).lower_bound_
assert_greater_equal(current_lower_bound, prev_lower_bound)
if bgmm.converged_:
break
assert(bgmm.converged_)
def test_compare_covar_type():
# We can compare the 'full' precision with the other cov_type if we apply
# 1 iter of the M-step (done during _initialize_parameters).
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
X = rand_data.X['full']
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='full',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
full_covariances = (
bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis])
# Check tied_covariance = mean(full_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='tied',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(tied_covariance, np.mean(full_covariances, 0))
# Check diag_covariance = diag(full_covariances)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='diag',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
diag_covariances = (bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis])
assert_almost_equal(diag_covariances,
np.array([np.diag(cov)
for cov in full_covariances]))
# Check spherical_covariance = np.mean(diag_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='spherical',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(
spherical_covariances, np.mean(diag_covariances, 1))
@ignore_warnings(category=ConvergenceWarning)
def test_check_covariance_precision():
# We check that the dot product of the covariance and the precision
# matrices is identity.
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components, n_features = 2 * rand_data.n_components, 2
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(n_components=n_components,
max_iter=100, random_state=rng, tol=1e-3,
reg_covar=0)
for covar_type in COVARIANCE_TYPE:
bgmm.covariance_type = covar_type
bgmm.fit(rand_data.X[covar_type])
if covar_type == 'full':
for covar, precision in zip(bgmm.covariances_, bgmm.precisions_):
assert_almost_equal(np.dot(covar, precision),
np.eye(n_features))
elif covar_type == 'tied':
assert_almost_equal(np.dot(bgmm.covariances_, bgmm.precisions_),
np.eye(n_features))
elif covar_type == 'diag':
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones((n_components, n_features)))
else:
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones(n_components))
@ignore_warnings(category=ConvergenceWarning)
def test_invariant_translation():
# We check here that adding a constant in the data change correctly the
# parameters of the mixture
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=100)
n_components = 2 * rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm1 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X)
bgmm2 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X + 100)
assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100)
assert_almost_equal(bgmm1.weights_, bgmm2.weights_)
assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_)
|
bsd-3-clause
|
sebastien-forestier/explaupoppydiva
|
scripts/arm_diva/test_analysis.py
|
1
|
7661
|
import cPickle
import matplotlib.pyplot as plt
from numpy import array, mean, std, sqrt
import os
import sys
from explaupoppydiva.drawer import Drawer
logs = os.getenv('HOME') + '/scm/Flowers/explaupoppydiva/logs/'
log_dirs = [
'2015-05-27_11-51-43-ARM-DIVA',
#'2015-05-26_20-59-31-ARM-DIVA',
]
conditions_to_plot = [
'Arm-Diva-Tr-H-seq_3000',
#'Arm-Diva-GB-H-seq_3000',
#'Arm-Diva-GB-F-seq_3000',
'Arm-Diva-MB-H-seq_3000',
'Arm-Diva-Tr-F-seq_3000',
'Arm-Diva-MB-F-seq_3000',
]
plot_explo = True
plot_explo_comp = False
plot_comp = False
iterations = 3000
eval_at = range(1, iterations +1, iterations/10)
for log_dir in log_dirs:
if plot_explo:
fig_explo, ax_explo = plt.subplots()
fig_explo.canvas.set_window_title('Exploration comparison for ' + log_dir)
if plot_explo_comp:
fig_explo_comp, ax_explo_comp = plt.subplots()
fig_explo_comp.canvas.set_window_title('ExploComp comparison for ' + log_dir)
if plot_comp:
fig_comp, ax_comp = plt.subplots()
fig_comp.canvas.set_window_title('Comp comparison for ' + log_dir)
for xp_dir in sorted(os.listdir(os.path.join(logs,log_dir))) + ['']:
print xp_dir
if xp_dir in conditions_to_plot and os.path.isdir(os.path.join(logs, log_dir, xp_dir)):
explo = {}
explo_comp = {}
comp = {}
print os.path.join(logs, log_dir, xp_dir)
for log_file in os.listdir(os.path.join(logs, log_dir, xp_dir)):
file_path = os.path.join(logs, log_dir, xp_dir, log_file)
if file_path.endswith('.pickle'):
print file_path
try:
with open(file_path, 'r') as f:
log = cPickle.load(f)
f.close()
print "explo", log.explo, "explocomp"#, log.explo_comp, "comp", mean(array(log.eval_errors[0]))
if plot_explo:
eval_at = range(1, iterations, iterations/len(log.explo))
explo[log_file] = array(log.explo)
# if len(log.explo) == len(eval_at):
# explo[log_file] = array(log.explo)
# if len(log.explo) == 2*len(log.config.eval_at):
# explo[log_file] = array(log.explo)[range(0,len(log.explo),2)]
#
if plot_explo_comp:
if len(log.explo_comp) == len(eval_at):
explo_comp[log_file] = array(log.explo_comp)
print "Exploration:", log.explo_comp_explo
if plot_comp:
if len(log.eval_errors) == len(eval_at):
comp[log_file] = array(log.eval_errors)
# drawer = Drawer(log)
# #drawer.plot_learning_curve()
# drawer.plot_scatter1D(plot_testcases=plot_comp, show=False)
# #drawer.plot_scatter2D(plot_testcases=False, show=False)
# #drawer.plot_scatter3D(plot_testcases=True, show=False)
# #drawer.plot_exploration_measure()
except ValueError:
print "ValueError"
if plot_explo:
if len(explo.values()) > 0:
l = len(explo.values()[0])
print log.config.name
#print explo.values()
print l
x = eval_at[:l]
y = mean(array(explo.values()), axis=0)
error = std(array(explo.values()), axis=0)
error = error / sqrt(len(explo)) # Standard error of the mean
color_cycle = ax_explo._get_lines.color_cycle
next_color = next(color_cycle)
ax_explo.plot(x, y, label = log.config.name, color=next_color)
ax_explo.fill_between(x, y-error, y+error, alpha=0.2, label = log.config.name, color = next_color)
#ax.errorbar(eval_at[:l],, yerr=, label = log.config.name)
if plot_explo_comp:
print "ExploComp", explo_comp.values()
if len(explo_comp.values()) > 0:
l = len(explo_comp.values()[0])
print log.config.name
#print explo_comp.values()
print l
x = eval_at[:l]
y = mean(array(explo_comp.values()), axis=0)
error = std(array(explo_comp.values()), axis=0)
error = error / sqrt(len(explo_comp)) # Standard error of the mean
color_cycle = ax_explo_comp._get_lines.color_cycle
next_color = next(color_cycle)
ax_explo_comp.plot(x, y, label = log.config.name, color=next_color)
ax_explo_comp.fill_between(x, y-error, y+error, alpha=0.2, label = log.config.name, color = next_color)
#ax_comp.errorbar(eval_at[:l],, yerr=, label = log.config.name)
if plot_comp:
#print "Comp", comp.values()
if len(comp.values()) > 0:
l = len(comp.values()[0])
print log.config.name
print mean(mean(array(comp.values()), axis=2), axis=0)
print l
x = eval_at[:l]
y = mean(mean(array(comp.values()), axis=2), axis=0)
error = std(mean(array(comp.values()), axis=2), axis=0)
error = error / sqrt(len(comp)) # Standard error of the mean
#print x, y, error
color_cycle = ax_comp._get_lines.color_cycle
next_color = next(color_cycle)
ax_comp.plot(x, y, label = log.config.name, color=next_color)
ax_comp.fill_between(x, y-error, y+error, alpha=0.2, label = log.config.name, color = next_color)
#ax_comp.errorbar(eval_at[:l],, yerr=, label = log.config.name)
if plot_explo:
fig_explo.show()
ax_explo.legend(loc='upper left')
#fig_explo.set_size_inches(19.2,12)
plt.xlabel('Iterations', fontsize=18)
plt.ylabel('Explored cells', fontsize=18)
plt.xlim([0,2700])
plt.savefig(logs + log_dir + '/explo-' + log_dir[0:] + '.png')
plt.show(block=False)
if plot_explo_comp:
fig_explo_comp.show()
ax_explo_comp.legend(loc='lower right')
fig_explo_comp.set_size_inches(19.2,12)
plt.savefig(logs + log_dir + '/explo_comp-' + log_dir[0:] + '.png')
plt.show(block=False)
if plot_comp:
fig_comp.show()
ax_comp.legend(loc='upper right')
fig_comp.set_size_inches(19.2,12)
plt.savefig(logs + log_dir + '/comp-' + log_dir[0:] + '.png')
plt.show(block=False)
plt.show()
|
gpl-3.0
|
pradyu1993/scikit-learn
|
examples/neighbors/plot_nearest_centroid.py
|
5
|
1781
|
"""
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print __doc__
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print shrinkage, np.mean(y == y_pred)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
mikebenfield/scikit-learn
|
sklearn/tests/test_metaestimators.py
|
52
|
4990
|
"""Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
|
bsd-3-clause
|
se4u/pylearn2
|
pylearn2/scripts/datasets/browse_small_norb.py
|
44
|
6901
|
#!/usr/bin/env python
import sys
import argparse
import pickle
import warnings
import exceptions
import numpy
try:
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
pyplot = None
from pylearn2.datasets import norb
warnings.warn("This script is deprecated. Please use ./browse_norb.py "
"instead. It is kept around as a tester for deprecated class "
"datasets.norb.SmallNORB",
exceptions.DeprecationWarning)
def main():
def parse_args():
parser = argparse.ArgumentParser(
description="Browser for SmallNORB dataset.")
parser.add_argument('--which_set',
default='train',
help="'train', 'test', or the path to a .pkl file")
parser.add_argument('--zca',
default=None,
help=("if --which_set points to a .pkl "
"file storing a ZCA-preprocessed "
"NORB dataset, you can optionally "
"enter the preprocessor's .pkl "
"file path here to undo the "
"ZCA'ing for visualization "
"purposes."))
return parser.parse_args()
def get_data(args):
if args.which_set in ('train', 'test'):
dataset = norb.SmallNORB(args.which_set, True)
else:
with open(args.which_set) as norb_file:
dataset = pickle.load(norb_file)
if len(dataset.y.shape) < 2 or dataset.y.shape[1] == 1:
print("This viewer does not support NORB datasets that "
"only have classification labels.")
sys.exit(1)
if args.zca is not None:
with open(args.zca) as zca_file:
zca = pickle.load(zca_file)
dataset.X = zca.inverse(dataset.X)
num_examples = dataset.X.shape[0]
topo_shape = ((num_examples, ) +
tuple(dataset.view_converter.shape))
assert topo_shape[-1] == 1
topo_shape = topo_shape[:-1]
values = dataset.X.reshape(topo_shape)
labels = numpy.array(dataset.y, 'int')
return values, labels, dataset.which_set
args = parse_args()
values, labels, which_set = get_data(args)
# For programming convenience, internally remap the instance labels to be
# 0-4, and the azimuth labels to be 0-17. The user will still only see the
# original, unmodified label values.
instance_index = norb.SmallNORB.label_type_to_index['instance']
def remap_instances(which_set, labels):
if which_set == 'train':
new_to_old_instance = [4, 6, 7, 8, 9]
elif which_set == 'test':
new_to_old_instance = [0, 1, 2, 3, 5]
num_instances = len(new_to_old_instance)
old_to_new_instance = numpy.ndarray(10, 'int')
old_to_new_instance.fill(-1)
old_to_new_instance[new_to_old_instance] = numpy.arange(num_instances)
instance_slice = numpy.index_exp[:, instance_index]
old_instances = labels[instance_slice]
new_instances = old_to_new_instance[old_instances]
labels[instance_slice] = new_instances
azimuth_index = norb.SmallNORB.label_type_to_index['azimuth']
azimuth_slice = numpy.index_exp[:, azimuth_index]
labels[azimuth_slice] = labels[azimuth_slice] / 2
return new_to_old_instance
new_to_old_instance = remap_instances(which_set, labels)
def get_new_azimuth_degrees(scalar_label):
return 20 * scalar_label
# Maps a label vector to the corresponding index in <values>
num_labels_by_type = numpy.array(norb.SmallNORB.num_labels_by_type, 'int')
num_labels_by_type[instance_index] = len(new_to_old_instance)
label_to_index = numpy.ndarray(num_labels_by_type, 'int')
label_to_index.fill(-1)
for i, label in enumerate(labels):
label_to_index[tuple(label)] = i
assert not numpy.any(label_to_index == -1) # all elements have been set
figure, axes = pyplot.subplots(1, 2, squeeze=True)
figure.canvas.set_window_title('Small NORB dataset (%sing set)' %
which_set)
# shift subplots down to make more room for the text
figure.subplots_adjust(bottom=0.05)
num_label_types = len(norb.SmallNORB.num_labels_by_type)
current_labels = numpy.zeros(num_label_types, 'int')
current_label_type = [0, ]
label_text = figure.suptitle("title text",
x=0.1,
horizontalalignment="left")
def redraw(redraw_text, redraw_images):
if redraw_text:
cl = current_labels
lines = [
'category: %s' % norb.SmallNORB.get_category(cl[0]),
'instance: %d' % new_to_old_instance[cl[1]],
'elevation: %d' % norb.SmallNORB.get_elevation_degrees(cl[2]),
'azimuth: %d' % get_new_azimuth_degrees(cl[3]),
'lighting: %d' % cl[4]]
lt = current_label_type[0]
lines[lt] = '==> ' + lines[lt]
text = ('Up/down arrows choose label, left/right arrows change it'
'\n\n' +
'\n'.join(lines))
label_text.set_text(text)
if redraw_images:
index = label_to_index[tuple(current_labels)]
image_pair = values[index, :, :, :]
for i in range(2):
axes[i].imshow(image_pair[i, :, :], cmap='gray')
figure.canvas.draw()
def on_key_press(event):
def add_mod(arg, step, size):
return (arg + size + step) % size
def incr_label_type(step):
current_label_type[0] = add_mod(current_label_type[0],
step,
num_label_types)
def incr_label(step):
lt = current_label_type[0]
num_labels = num_labels_by_type[lt]
current_labels[lt] = add_mod(current_labels[lt], step, num_labels)
if event.key == 'up':
incr_label_type(-1)
redraw(True, False)
elif event.key == 'down':
incr_label_type(1)
redraw(True, False)
elif event.key == 'left':
incr_label(-1)
redraw(True, True)
elif event.key == 'right':
incr_label(1)
redraw(True, True)
elif event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
redraw(True, True)
pyplot.show()
if __name__ == '__main__':
main()
|
bsd-3-clause
|
peterfpeterson/mantid
|
dev-docs/source/conf.py
|
3
|
6773
|
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
from sphinx import __version__ as sphinx_version
import sphinx_bootstrap_theme
from distutils.version import LooseVersion
# -- General configuration ------------------------------------------------
if LooseVersion(sphinx_version) > LooseVersion("1.6"):
def setup(app):
"""Called automatically by Sphinx when starting the build process
"""
app.add_stylesheet("custom.css")
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# we use pngmath over mathjax so that the the offline help isn't reliant on
# anything external and we don't need to include the large mathjax package
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx'
]
if LooseVersion(sphinx_version) > LooseVersion("1.8"):
extensions.append('sphinx.ext.imgmath')
else:
extensions.append('sphinx.ext.pngmath')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MantidProject'
copyright = u'2007-2020, Mantid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = "master"
# The full version, including alpha/beta/rc tags.
release = version
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for pngmath --------------------------------------------------
# Load the preview package into latex
pngmath_latex_preamble=r'\usepackage[active]{preview}'
# Ensures that the vertical alignment of equations is correct.
# See http://sphinx-doc.org/ext/math.html#confval-pngmath_use_preview
pngmath_use_preview = True
# -- HTML output ----------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The "title" for HTML documentation generated with Sphinx' templates. This is appended to the <title> tag of individual pages
# and used in the navigation bar as the "topmost" element.
html_title = ""
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = os.path.relpath(os.path.join('..', '..', 'images', 'Mantid_Logo_Transparent.png'))
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If true, Smart Quotes will be used to convert quotes and dashes to
# typographically correct entities.
if sphinx_version < "1.7":
html_use_smartypants = True
else:
smartquotes = True
# Hide the Sphinx usage as we reference it on github instead.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# Theme-specific options to customize the look and feel of a theme.
# We config the bootstrap settings here, and apply CSS changes in
# custom.css rather than here.
html_theme_options = {
# Navigation bar title.
'navbar_title': " ", # deliberate single space so it's not visible
# Tab name for entire site.
'navbar_site_name': "Mantid",
# Add links to the nav bar. Third param of tuple is true to create absolute url.
'navbar_links': [
("Home", "index"),
("Download", "http://download.mantidproject.org", True),
("Wiki", "http://www.mantidproject.org", True),
("User Documentation", "http://docs.mantidproject.org", True),
("Contact Us", "http://www.mantidproject.org/Contact", True),
],
# Do not show the "Show source" button.
'source_link_position': "no",
# Remove the local TOC from the nav bar
'navbar_pagenav': False,
# Hide the next/previous in the nav bar.
'navbar_sidebarrel': True,
# Use the latest version.
'bootstrap_version': "3",
# Ensure the nav bar always stays on top of page.
'navbar_fixed_top': "false",
}
# -- Options for Epub output ---------------------------------------------------
# This flag determines if a toc entry is inserted again at the beginning of its nested toc listing.
# This allows easier navigation to the top of a chapter, but can be confusing because it mixes entries of different depth in one list.
# The default value is True.
epub_tocdup = True
#This setting control the scope of the epub table of contents
epub_tocscope = 'includehidden'
#The author of the document. This is put in the Dublin Core metadata. The default value is 'unknown'.
epub_author = "The Mantid Project"
#The publisher of the document. This is put in the Dublin Core metadata. You may use any sensible string, e.g. the project homepage.
epub_publisher = "The Mantid Project"
#An identifier for the document. This is put in the Dublin Core metadata.
#For published documents this is the ISBN number, but you can also use an alternative scheme, e.g. the project homepage.
#The default value is 'unknown'.
epub_identifier = "www.mantidproject.org"
#The publication scheme for the epub_identifier. This is put in the Dublin Core metadata.
#For published books the scheme is 'ISBN'. If you use the project homepage, 'URL' seems reasonable.
#The default value is 'unknown'.
epub_scheme='URL'
#A unique identifier for the document. This is put in the Dublin Core metadata. You may use a random string.
#The default value is 'unknown'.
epub_uid = "Mantid Reference: " + version
# -- Link to other projects ----------------------------------------------------
intersphinx_mapping = {
'h5py': ('https://h5py.readthedocs.io/en/stable/', None),
'matplotlib': ('http://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'python': ('https://docs.python.org/3/', None),
'SciPy': ('http://docs.scipy.org/doc/scipy/reference', None),
'mantid': ('http://docs.mantidproject.org/', None)
}
|
gpl-3.0
|
afroisalreadyinu/stashpy
|
stashpy/tests/load/load_test.py
|
1
|
3429
|
"""In order to run the load tests, start stashpy and ElasticSearch
locally and run this file"""
import os
from time import sleep
import random
import socket
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from urllib.parse import urlencode
from datetime import datetime
from matplotlib import pyplot
import yaml
import json
import time
import stashpy
from stashpy.indexer import DEFAULT_INDEX_PATTERN
PROCESSES = ['nginx', 'uwsgi', 'python3', 'postgres', 'rabbitmq']
USERS = ['root', 'admini', 'restapi', 'postgres']
HOSTS = ['api01', 'api02', 'db', 'webserver', 'queue']
LOAD_DURATION = 5
#Why is ES query syntax so weird?
def get_config():
filepath = os.path.abspath(os.path.join(__file__, '../config.yml'))
with open(filepath, 'r') as conf_file:
config = yaml.load(conf_file)
return config
def index_url():
index = datetime.strftime(datetime.now(), DEFAULT_INDEX_PATTERN)
return 'http://localhost:9200/{}'.format(index)
def search_url():
index = datetime.strftime(datetime.now(), DEFAULT_INDEX_PATTERN)
return 'http://localhost:9200/{}/doc/_search'.format(index)
def rand_doc(base):
vals = dict(
process=random.choice(PROCESSES),
host=random.choice(HOSTS),
user=random.choice(USERS),
duration=random.randint(1000, 50000) / 1000.0,
output=random.randint(10, 50000)
)
doc = base.format(**vals)
return doc, vals
#TODO Better living through iterators
def run_step(sock, steps, doc_base):
docs_and_vals = [rand_doc(doc_base) for _ in range(5*steps)]
wait_step = 5.0/(5*steps + 1)
vals = []
for doc,val in docs_and_vals:
sock.sendall(doc.encode('utf-8') + b'\n')
time.sleep(wait_step)
vals.append(val)
return vals
def check_step(es_host, es_port, vals):
q = {"query": {
"filtered": {
"query": {"match_all": {}},
"filter": {}}}}
url = search_url()
succeeded = 0
for entry in vals:
q['query']['filtered']['filter']['and'] = [{'term': {key: value}} for key,value in entry.items()]
resp = urlopen(url, data=json.dumps(q).encode('utf-8'))
hits = json.loads(resp.read().decode('utf-8'))['hits']['total']
if hits == 1:
succeeded += 1
return succeeded
def main():
config = get_config()
try:
urlopen(Request(index_url(), method='DELETE'))
except HTTPError:
pass
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((config['address'], config['port']))
per_second = list(range(0, 210, 10))
per_second[0] = 1
success_vals = []
for step in per_second:
print('Doing {} loglines per sec'.format(step))
vals = run_step(sock, step, config['processor_spec']['to_dict'][0])
if step == 1:
time.sleep(2)
flush_url = '{}/_flush'.format(index_url())
resp = urlopen(Request(flush_url, urlencode({'wait_if_ongoing': 'true'}).encode('utf-8'))).read()
success_vals.append(check_step(config['indexer_config']['host'],
config['indexer_config']['port'],
vals))
pyplot.plot(per_second, success_vals)
pyplot.xlabel('Log lines per second')
pyplot.ylabel('Successful')
pyplot.savefig('lines_per_sec.png', bbox_inches='tight')
if __name__ == "__main__":
main()
|
gpl-3.0
|
QuLogic/cartopy
|
lib/cartopy/mpl/contour.py
|
2
|
3963
|
# Copyright Cartopy Contributors
#
# This file is part of Cartopy and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
from matplotlib.contour import QuadContourSet
import matplotlib.path as mpath
import numpy as np
class GeoContourSet(QuadContourSet):
"""
A contourset designed to handle things like contour labels.
"""
# nb. No __init__ method here - most of the time a GeoContourSet will
# come from GeoAxes.contour[f]. These methods morph a ContourSet by
# fiddling with instance.__class__.
def clabel(self, *args, **kwargs):
# nb: contour labelling does not work very well for filled
# contours - it is recommended to only label line contours.
# This is especially true when inline=True.
# This wrapper exist because mpl does not properly transform
# paths. Instead it simply assumes one path represents one polygon
# (not necessarily the case), and it assumes that
# transform(path.verts) is equivalent to transform_path(path).
# Unfortunately there is no way to easily correct this error,
# so we are forced to pre-transform the ContourSet's paths from
# the source coordinate system to the axes' projection.
# The existing mpl code then has a much simpler job of handling
# pre-projected paths (which can now effectively be transformed
# naively).
for col in self.collections:
# Snaffle the collection's path list. We will change the
# list in-place (as the contour label code does in mpl).
paths = col.get_paths()
# The ax attribute is deprecated in MPL 3.3 in favor of
# axes. So, here we test if axes is present and fall back
# on the old self.ax to support MPL versions less than 3.3
if hasattr(self, "axes"):
data_t = self.axes.transData
else:
data_t = self.ax.transData
# Define the transform that will take us from collection
# coordinates through to axes projection coordinates.
col_to_data = col.get_transform() - data_t
# Now that we have the transform, project all of this
# collection's paths.
new_paths = [col_to_data.transform_path(path) for path in paths]
new_paths = [path for path in new_paths if path.vertices.size >= 1]
# The collection will now be referenced in axes projection
# coordinates.
col.set_transform(data_t)
# Clear the now incorrectly referenced paths.
del paths[:]
for path in new_paths:
if path.vertices.size == 0:
# Don't persist empty paths. Let's get rid of them.
continue
# Split the path if it has multiple MOVETO statements.
codes = np.array(
path.codes if path.codes is not None else [0])
moveto = codes == mpath.Path.MOVETO
if moveto.sum() <= 1:
# This is only one path, so add it to the collection.
paths.append(path)
else:
# The first MOVETO doesn't need cutting-out.
moveto[0] = False
split_locs = np.flatnonzero(moveto)
split_verts = np.split(path.vertices, split_locs)
split_codes = np.split(path.codes, split_locs)
for verts, codes in zip(split_verts, split_codes):
# Add this path to the collection's list of paths.
paths.append(mpath.Path(verts, codes))
# Now that we have prepared the collection paths, call on
# through to the underlying implementation.
return super().clabel(*args, **kwargs)
|
lgpl-3.0
|
Vimos/scikit-learn
|
sklearn/manifold/isomap.py
|
39
|
7519
|
"""Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto', n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter,
n_jobs=self.n_jobs)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance', n_jobs=self.n_jobs)
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
# Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min(self.dist_matrix_[indices[i]] +
distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
bsd-3-clause
|
ishanic/scikit-learn
|
examples/mixture/plot_gmm_selection.py
|
248
|
3223
|
"""
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
|
bsd-3-clause
|
hsiaoyi0504/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
12
|
11592
|
import numpy as np
from scipy.sparse import csr_matrix
from scipy.linalg import block_diag
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method, random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method', LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
@if_not_mac_os()
def test_lda_multi_jobs():
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=3,
learning_method=method, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_not_mac_os()
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=-1, learning_offset=5.,
total_samples=30, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X, invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X, invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
|
bsd-3-clause
|
sahilshekhawat/sympy
|
sympy/utilities/runtests.py
|
3
|
79087
|
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
from inspect import isgeneratorfunction
from sympy.core.cache import clear_cache
from sympy.core.compatibility import exec_, PY3, string_types, range
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
class Skipped(Exception):
pass
import __future__
# add more flags ??
future_flags = __future__.division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
# ovverride reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(sys_normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
global sys_case_insensitive
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
sys_case_insensitive = (os.path.isdir(sympy_dir) and
os.path.isdir(sympy_dir.lower()) and
os.path.isdir(sympy_dir.upper()))
return sys_normcase(sympy_dir)
def sys_normcase(f):
if sys_case_insensitive: # global defined after call to get_sympy_dir()
return f.lower()
return f
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
def run_in_subprocess_with_hash_randomization(function, function_args=(),
function_kwargs={}, command=sys.executable,
module='sympy.utilities.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.utilities.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.utilities.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't reconize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring])
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs={}, doctest_args=(),
doctest_kwargs={}, examples_args=(), examples_kwargs={'quiet': True}):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.utilities.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples")
from all import run_examples # examples/all.py
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if not (sys.platform == "win32" or PY3):
# run Sage tests; Sage currently doesn't support Windows or Python 3
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py", shell=True) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", None) or ()
# ensure that kw is a tuple
if isinstance(kw, str):
kw = (kw, )
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
blacklist = kwargs.get('blacklist', [])
blacklist = convert_to_native_paths(blacklist)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout,
slow=slow, enhance_asserts=enhance_asserts))
def doctest(*paths, **kwargs):
"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend([
"doc/src/modules/plotting.rst", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/physics/gaussopt.py", # raises deprecation warning
])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# don't display matplotlib windows
from sympy.plotting.plot import unset_show
unset_show()
if import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend(["doc/src/modules/numeric-computation.rst"])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py",
"sympy/utilities/benchmarking.py"
])
blacklist = convert_to_native_paths(blacklist)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
r = PyTestReporter(verbose, split=split, colors=colors,\
force_colors=force_colors)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
setup_pprint()
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
>>> from sympy.utilities.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
return l[(i - 1)*len(l)//t:i*len(l)//t]
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
def test(self, sort=False, timeout=False, slow=False, enhance_asserts=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
else:
from random import shuffle
random.seed(self._seed)
shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow, enhance_asserts)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False, enhance_asserts=False):
funcs = []
try:
gl = {'__file__': filename}
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec")
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
clear_cache()
self._count += 1
random.seed(self._seed)
pytestfile = ""
if "XFAIL" in gl:
pytestfile = inspect.getsourcefile(gl["XFAIL"])
pytestfile2 = ""
if "slow" in gl:
pytestfile2 = inspect.getsourcefile(gl["slow"])
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile or
inspect.getsourcefile(gl[f]) == pytestfile2)]
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
self._reporter.entering_filename(filename, len(funcs))
raise
self._reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
self._reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
self._reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
self._reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
self._reporter.test_skip(v)
elif t.__name__ == "XFail":
self._reporter.test_xfail()
elif t.__name__ == "XPass":
self._reporter.test_xpass(v)
else:
self._reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def _timeout(self, function, timeout):
def callback(x, y):
signal.alarm(0)
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([sys_normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
if not self._process_dependencies(test.globs['_doctest_depends_on']):
self._reporter.test_skip()
continue
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [sys_normcase(gi) for gi in g]
def _process_dependencies(self, deps):
"""
Returns ``False`` if some dependencies are not met and the test should be
skipped otherwise returns ``True``.
"""
executables = deps.get('exe', None)
moduledeps = deps.get('modules', None)
viewers = deps.get('disable_viewers', None)
pyglet = deps.get('pyglet', None)
# print deps
if executables is not None:
for ex in executables:
found = find_executable(ex)
if found is None:
return False
if moduledeps is not None:
for extmod in moduledeps:
if extmod == 'matplotlib':
matplotlib = import_module(
'matplotlib',
__import__kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is not None:
pass
else:
return False
else:
# TODO min version support
mod = import_module(extmod)
if mod is not None:
version = "unknown"
if hasattr(mod, '__version__'):
version = mod.__version__
else:
return False
if viewers is not None:
import tempfile
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
if PY3:
vw = '#!/usr/bin/env python3\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
else:
vw = '#!/usr/bin/env python\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
for viewer in viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if pyglet:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit=True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
return True
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version by looking harder for code in the
case that it looks like the the code comes from a different module.
In the case of decorated functions (e.g. @vectorize) they appear
to come from a different module (e.g. multidemensional) even though
their code is not there.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, string_types):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, string_types)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if (inspect.isfunction(val) or
inspect.isclass(val) or
isinstance(val, property)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, string_types):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall("line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
if hasattr(obj, '_doctest_depends_on'):
globs['_doctest_depends_on'] = obj._doctest_depends_on
else:
globs['_doctest_depends_on'] = {}
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occuring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
width = int(columns)
except ValueError:
pass
if width != 0:
return width
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
if seed is not None:
self.write("random seed: %d\n" % seed)
from .misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
self.write(char, "Blue")
if self._verbose:
self.write(" - ", "Blue")
if v is not None:
self.write(message, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir()
|
bsd-3-clause
|
archangdcc/avalon-extras
|
farm-manager/status-report/hsplot.py
|
3
|
4305
|
#!/usr/bin/env python2
from __future__ import print_function
import os
import datetime
import sys
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
import matplotlib
matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import numpy as np
def hsplot(hashrate, cfg, time0):
labels, vps, t = hashrate
if len(t) < 2:
print("More log files are needed for plotting.")
return 1
print("Plotting into " + cfg['HSplot']['img_dir'] + "hs-" +
time0.strftime("%Y_%m_%d_%H_%M") + ".png ... ", end="")
sys.stdout.flush()
for k in range(0, len(t)):
t[k] = t[k] / 3600.0
x = np.array(t)
ys = []
fs = []
for v in vps:
y = np.array(v)
ys.append(y)
fs.append(interp1d(x, y))
ymax = np.amax(np.hstack(ys))
xnew = np.linspace(t[0], t[-1], 1800)
fig = plt.figure(
figsize=(float(cfg['HSplot']['width']) / float(cfg['HSplot']['dpi']),
float(cfg['HSplot']['height']) / float(cfg['HSplot']['dpi'])),
dpi=int(cfg['HSplot']['dpi']),
facecolor="white")
titlefont = {'family': cfg['HSplot']['font_family1'],
'weight': 'normal',
'size': int(cfg['HSplot']['font_size1'])}
ticks_font = matplotlib.font_manager.FontProperties(
family=cfg['HSplot']['font_family2'], style='normal',
size=int(cfg['HSplot']['font_size2']), weight='normal',
stretch='normal')
colorlist = ['b-', 'c-', 'g-', 'r-', 'y-', 'm-']
plots = []
for i in range(0, len(vps)):
p, = plt.plot(xnew, fs[i](xnew), colorlist[i])
plots.append(p)
plt.legend(plots, labels, loc=2, prop=ticks_font)
# x axis tick label
xticklabel = []
xmax = time0 - datetime.timedelta(
seconds=(time0.hour - (time0.hour / 2) * 2) * 3600 + time0.minute * 60)
xmin = xmax
xticklabel.append(xmin.strftime("%H:%M"))
for i in range(0, 12):
xmin = xmin - datetime.timedelta(seconds=7200)
xticklabel.append(xmin.strftime("%H:%M"))
xticklabel = xticklabel[::-1]
# y axis tick label
ymax_s = str(int(ymax))
flag = int(ymax_s[0])
yticklabel = ['0']
if flag == 1:
# 0.1;0.2;0.3....
ystep = 1 * (10 ** (len(ymax_s) - 2))
ylim = int(ymax + ystep - 1) / ystep * ystep
for i in range(1, int(ylim / ystep)):
yticklabel.append("{:,}".format(i * (10 ** (len(ymax_s) - 2))))
elif flag >= 2 and flag <= 3:
# 0.2;0.4;0.6...
ystep = 2 * (10 ** (len(ymax_s) - 2))
ylim = int(ymax + ystep - 1) / ystep * ystep
for i in range(1, int(ylim / ystep)):
yticklabel.append("{:,}".format(i * 2 * (10 ** (len(ymax_s) - 2))))
elif flag >= 4 and flag <= 6:
# 0.25;0.50;0.75...
ystep = 25*(10**(len(ymax_s)-3))
ylim = int(ymax + ystep - 1) / ystep * ystep
for i in range(1, int(ylim / ystep)):
yticklabel.append("{:,}".format(i * 25 * (10 ** (len(ymax_s) - 3))))
else:
# 0.5;1.0;1.5...
ystep = 5 * (10 ** (len(ymax_s) - 2))
ylim = int(ymax + ystep - 1) / ystep * ystep
for i in range(1, int(ylim / ystep)):
yticklabel.append("{:,}".format(i * 5 * (10 ** (len(ymax_s) - 2))))
ax = plt.gca()
ax.set_xticks(np.linspace((xmin-time0).total_seconds() / 3600.0,
(xmax-time0).total_seconds() / 3600.0, 13))
ax.set_xticklabels(tuple(xticklabel))
ax.set_yticks(np.linspace(0, ylim - ystep, len(yticklabel)))
ax.set_yticklabels(tuple(yticklabel))
ax.tick_params(tick1On=False, tick2On=False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(cfg['HSplot']['title'], fontdict=titlefont)
for label in ax.get_xticklabels():
label.set_fontproperties(ticks_font)
for label in ax.get_yticklabels():
label.set_fontproperties(ticks_font)
plt.axis([-24, 0, 0, ylim])
plt.grid(color='0.75', linestyle='-')
plt.tight_layout()
plt.savefig(cfg['HSplot']['img_dir'] + "hs-" +
time0.strftime("%Y_%m_%d_%H_%M") + ".png")
print("Done.")
plt.clf()
return "hs-"+time0.strftime("%Y_%m_%d_%H_%M")+".png"
|
unlicense
|
Imtinan1996/autonomous-vehicle-modules
|
lane-detection-module/lane-detection.py
|
1
|
2889
|
import numpy as np
import cv2
import pyautogui
import matplotlib.pyplot as plt
#area_to_remove=np.array([[100,474],[100,375],[300,200],[520,200],[800,375],[800,475]]) # for driving segment
area_to_remove=np.array([[250,650],[450,500],[850,500],[1050,650]]) # for youtube video
def draw_lanes(image,lanes):
try:
for line in lanes:
coordinates=line
if coordinates != []:
cv2.line(image,(coordinates[0],coordinates[1]),(coordinates[2],coordinates[3]),[0,255,0],10)
except:
pass
return image
def mask_area(image):
mask=np.zeros_like(image)
cv2.fillPoly(mask,[area_to_remove],255)
masked_img=cv2.bitwise_and(image,mask)
return masked_img
def blur_image(image):
return cv2.GaussianBlur(image,(5,5),0)
def edge_convert(image):
processed_img=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
processed_img=cv2.Canny(processed_img,threshold1=300,threshold2=400)
return processed_img
def detect_lines(image):
image=blur_image(image)
detected_lines=cv2.HoughLinesP(image,1,np.pi/180,180,np.array([]),100,100)
gradients=[]
try:
for line in detected_lines:
coordinates=line[0]
cv2.line(image,(coordinates[0],coordinates[1]),(coordinates[2],coordinates[3]),[255,255,255],3)
gradients.append(((coordinates[3]-coordinates[1])/(coordinates[2]-coordinates[0])))
except:
pass
return image,detected_lines,gradients
def findLanes(videoName):
video=cv2.VideoCapture(videoName)
line1=[]
line2=[]
while True:
bool,frame=video.read()
if bool is False:
cv2.destroyAllWindows()
video.release()
break
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
video.release()
break
cv2.imshow('driving-video',frame)
edges=edge_convert(frame)
cv2.imshow('canny-edges',edges)
roi=mask_area(frame)
cv2.imshow('removed unwanted area',roi)
roi_edges=mask_area(edges)
cv2.imshow('roi edges',roi_edges)
lines,lanes,gradients=detect_lines(roi_edges)
cv2.imshow('lines edges',lines)
for i in range(len(gradients)):
if gradients[i]>0.5: #LINE 2
#print("right line found with coordinates",lanes[i][0],"and gradients",gradients[i])
line2=lanes[i][0]
elif gradients[i]<-0.5: #line 1
#print("left line found with coordinates",lanes[i][0],"and gradients",gradients[i])
line1=lanes[i][0]
frame_with_lanes=draw_lanes(frame,[line1,line2])
cv2.imshow('the lanes',frame_with_lanes)
#print("gradients",gradients)
findLanes('sample-videos/test-from-youtube.mp4')
|
mit
|
gregorian72/suckless_offsec
|
pi_sensor_scripts/ultrashall_test.py
|
1
|
15302
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys, traceback
import datetime
import numpy as np
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import time
import RPi.GPIO as GPIO
import RPi.GPIO as GPIO2
import RPi.GPIO as GPIO3
import RPi.GPIO as GPIO4
from subprocess import call
# define GPIO pins for ultraschall
GPIOTrigger = 17 # I
GPIOEcho = 18 # I
GPIO2Trigger = 22 # II
GPIO2Echo = 23 # II
GPIO3Trigger = 24 # III
GPIO3Echo = 25 # III
max_oel = 620
minutes = mdates.MinuteLocator() # every hour
hours = mdates.HourLocator() # every hour
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
#yearsFmt = mdates.DateFormatter('%Y')
yearsFmt = mdates.DateFormatter('%Y-%m-%d_%H:%M')
##################################################################################
# The wiring for the LCD is as follows:
# 1 : GND
# 2 : 5V
# 3 : Contrast (0-5V)*
# 4 : RS (Register Select)
# 5 : R/W (Read Write) - GROUND THIS PIN
# 6 : Enable or Strobe
# 7 : Data Bit 0 - NOT USED
# 8 : Data Bit 1 - NOT USED
# 9 : Data Bit 2 - NOT USED
# 10: Data Bit 3 - NOT USED
# 11: Data Bit 4
# 12: Data Bit 5
# 13: Data Bit 6
# 14: Data Bit 7
# 15: LCD Backlight +5V**
# 16: LCD Backlight GND
# Define GPIO to LCD mapping
LCD_RS = 5
LCD_E = 12
LCD_D4 = 6
LCD_D5 = 13
LCD_D6 = 19
LCD_D7 = 26
# Define some device constants
LCD_WIDTH = 20 # Maximum characters per line
LCD_CHR = True
LCD_CMD = False
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
LCD_LINE_3 = 0x94 # LCD RAM address for the 3rd line
LCD_LINE_4 = 0xD4 # LCD RAM address for the 4th line
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
#####################################################################
def lcd_init():
# Initialise display
lcd_byte(0x33,LCD_CMD) # 110011 Initialise
lcd_byte(0x32,LCD_CMD) # 110010 Initialise
lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction
lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size
lcd_byte(0x01,LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def lcd_byte(bits, mode):
# Send byte to data pins
# bits = data
# mode = True for character
# False for command
GPIO3.output(LCD_RS, mode) # RS
# High bits
GPIO3.output(LCD_D4, False)
GPIO3.output(LCD_D5, False)
GPIO3.output(LCD_D6, False)
GPIO3.output(LCD_D7, False)
if bits&0x10==0x10:
GPIO3.output(LCD_D4, True)
if bits&0x20==0x20:
GPIO3.output(LCD_D5, True)
if bits&0x40==0x40:
GPIO3.output(LCD_D6, True)
if bits&0x80==0x80:
GPIO3.output(LCD_D7, True)
# Toggle 'Enable' pin
lcd_toggle_enable()
# Low bits
GPIO3.output(LCD_D4, False)
GPIO3.output(LCD_D5, False)
GPIO3.output(LCD_D6, False)
GPIO3.output(LCD_D7, False)
if bits&0x01==0x01:
GPIO3.output(LCD_D4, True)
if bits&0x02==0x02:
GPIO3.output(LCD_D5, True)
if bits&0x04==0x04:
GPIO3.output(LCD_D6, True)
if bits&0x08==0x08:
GPIO3.output(LCD_D7, True)
# Toggle 'Enable' pin
lcd_toggle_enable()
def lcd_toggle_enable():
# Toggle enable
time.sleep(E_DELAY)
GPIO3.output(LCD_E, True)
time.sleep(E_PULSE)
GPIO3.output(LCD_E, False)
time.sleep(E_DELAY)
def lcd_string(message,line,style):
# Send string to display
# style=1 Left justified
# style=2 Centred
# style=3 Right justified
if style==1:
message = message.ljust(LCD_WIDTH," ")
elif style==2:
message = message.center(LCD_WIDTH," ")
elif style==3:
message = message.rjust(LCD_WIDTH," ")
lcd_byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]),LCD_CHR)
#############################################
def tail_last_lines():
cmd = ["tail","-n300","/home/pi/datafile.csv"]
fh = open("/home/pi/datafile_last_week.csv","wb")
call(cmd,stdout=fh)
fh.close()
def render_data(datasource_file, picture_dest_file_and_path, titel):
print("##### Render started:", datasource_file)
days, oel_t1 = np.loadtxt(datasource_file, delimiter = ';', usecols=(0,3),unpack=True, converters={ 0: mdates.strpdate2num('%Y-%m-%d_%H:%M')})
days, oel_t2 = np.loadtxt(datasource_file, delimiter = ';', usecols=(0,6),unpack=True, converters={ 0: mdates.strpdate2num('%Y-%m-%d_%H:%M')})
days, oel_t3 = np.loadtxt(datasource_file, delimiter = ';', usecols=(0,9),unpack=True, converters={ 0: mdates.strpdate2num('%Y-%m-%d_%H:%M')})
days, temp_s1= np.loadtxt(datasource_file, delimiter = ';', usecols=(0,10),unpack=True, converters={ 0: mdates.strpdate2num('%Y-%m-%d_%H:%M')})
####### GRAPH 1 #############
plt.clf()
plt.subplot(2, 1, 1)
plt.subplots_adjust(hspace = .001)
plt.tick_params(labelright=True, labelbottom=False)
plt.plot_date(x=days, y=oel_t1, fmt="r-")
plt.plot_date(x=days, y=oel_t2, fmt="g-")
plt.plot_date(x=days, y=oel_t3, fmt="b-")
axes = plt.gca()
axes.set_ylim([0,800])
plt.yticks([0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800])
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d_%H:%M'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
#plt.gca().xaxis.set_major_locator(mdates.MonthLocator())
plt.xticks(rotation=90)
plt.tick_params(axis='both', which='major', labelsize=5)
plt.title(titel)
plt.xlabel('Zeit')
plt.ylabel('Liter')
plt.grid(b=True, linestyle='-', which='both', linewidth=1, alpha=0.3)
####### GRAPH 2 #############
plt.subplot(2, 1, 2)
plt.subplots_adjust(hspace = .001)
plt.tick_params(labelright=True)
plt.xticks(rotation=90)
plt.tick_params(axis='both', which='major', labelsize=5)
plt.plot_date(x=days, y=temp_s1, fmt="r-")
plt.yticks([-10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45])
plt.grid(b=True, linestyle='-', which='both', linewidth=1, alpha=0.3)
plt.xlabel('Zeit')
plt.ylabel('Temp.')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d_%H:%M'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
# plt.savefig(picture_dest_file_and_path, dpi=150) # save the figure to file
# plt.close(plt) # close the figure
print("##### Render completed")
############################################
def update_display(t1_cm, t2_cm, t3_cm, t1_liter, t2_liter, t3_liter):
lcd_init()
zeit = ""
zeit = time.strftime("%Y-%m-%d_%H:%M")
lcd_string(str(zeit),LCD_LINE_1,1)
# lcd_string("TANK 1 (cm):"+str(t1_cm),LCD_LINE_2,1)
lcd_string("TANK 1 (L ):"+str(t1_liter),LCD_LINE_2,1)
# lcd_string("TANK 2 (cm):"+str(t2_cm),LCD_LINE_3,1)
lcd_string("TANK 2 (L ):"+str(t2_liter),LCD_LINE_3,1)
# lcd_string("TANK 3 (cm):"+str(t3_cm),LCD_LINE_4,1)
lcd_string("TANK 3 (L ):"+str(t3_liter),LCD_LINE_4,1)
# lcd_string(" TANK 1 | TANK 2",LCD_LINE_2,1)
# lcd_string("CM: "+'{:<5}'.format(str(t1_cm)) +" | "+ str(t2_cm) ,LCD_LINE_3,1)
# lcd_string("L : "+'{:<5}'.format(str(t1_liter))+" | "+ str(t2_liter) ,LCD_LINE_4,1)
############################################
def update_webserver(t1_cm, t2_cm, t3_cm, t1_liter, t2_liter, t3_liter):
zeit = time.strftime("%Y-%m-%d_%H:%M")
line0 = str(zeit) + "\n"
line1 = " TANK 1 | TANK 2\n"
line2 = "CM: "+'{:<5}'.format(str(t1_cm)) +" | "+ str(t2_cm) + "\n"
line3 = "L : "+'{:<5}'.format(str(t1_liter))+" | "+ str(t2_liter) + "\n"
line1 = "Tank 1: L : "+'{:<5}'.format(str(t1_liter))+"\n"
line2 = "Tank 1:CM : "+'{:<5}'.format(str(t1_cm))+"\n"
line3 = "Tank 2: L : "+'{:<5}'.format(str(t2_liter))+"\n"
line4 = "Tank 2:CM : "+'{:<5}'.format(str(t2_cm))+"\n"
line5 = "Tank 3: L : "+'{:<5}'.format(str(t3_liter))+"\n"
line6 = "Tank 3:CM : "+'{:<5}'.format(str(t3_cm))+"\n"
text = line0 + line1 +line2 + line3 + line4 + line5 +line6
return text
############################################
def get_temperatur_s1():
tempfile = open("/sys/bus/w1/devices/28-000005da3366/w1_slave")
thetext = tempfile.read()
tempfile.close()
#tempdata = thetext.split("\n")[1].split(" ")[9]
tempdata = thetext.split("\n")[1].split("=")[1]
#print "tempdate:" + tempdata
#temperatur = float(tempdata[2:])
temperatur = float(tempdata)
temperatur = temperatur / 1000
print "temperatur:" + str(temperatur)
return temperatur
# function to measure the distance
def MeasureDistance():
# set trigger to high
GPIO.output(GPIOTrigger, True)
# set trigger after 10µs to low
time.sleep(0.00001)
GPIO.output(GPIOTrigger, False)
# store initial start time
StartTime = time.time()
StopTime = time.time()
# store start time
while GPIO.input(GPIOEcho) == 0:
StartTime = time.time()
# store stop time
while GPIO.input(GPIOEcho) == 1:
StopTime = time.time()
# calculate distance
TimeElapsed = StopTime - StartTime
Distance = (TimeElapsed * 34300) / 2
print("%.2f" % (Distance))
return Distance
##################################
def MeasureDistance2():
# set trigger to high
GPIO2.output(GPIO2Trigger, True)
# set trigger after 10µs to low
time.sleep(0.00001)
GPIO2.output(GPIO2Trigger, False)
# store initial start time
StartTime = time.time()
StopTime = time.time()
# store start time
while GPIO2.input(GPIO2Echo) == 0:
StartTime = time.time()
# store stop time
while GPIO2.input(GPIO2Echo) == 1:
StopTime = time.time()
# calculate distance
TimeElapsed = StopTime - StartTime
Distance = (TimeElapsed * 34300) / 2
print("%.2f" % (Distance))
return Distance
########################################
def MeasureDistance3():
# set trigger to high
GPIO4.output(GPIO3Trigger, True)
# set trigger after 10µs to low
time.sleep(0.00001)
GPIO4.output(GPIO3Trigger, False)
# store initial start time
StartTime = time.time()
StopTime = time.time()
# store start time
while GPIO4.input(GPIO3Echo) == 0:
StartTime = time.time()
# store stop time
while GPIO4.input(GPIO3Echo) == 1:
StopTime = time.time()
# calculate distance
TimeElapsed = StopTime - StartTime
Distance = (TimeElapsed * 34300) / 2
print("%.2f" % (Distance))
return Distance
########################################
# main function
def main():
print("Start.")
zeit = time.strftime("%Y-%m-%d_%H:%M")
try:
print("Messe Sensor 1")
Distance = 0
Distance = MeasureDistance()
print("1 Fertig")
time.sleep(1)
print("Messe Sensor 2")
Distance2 = 0
Distance2 = MeasureDistance2()
print("2 Fertig")
print("Messe Sensor 3")
Distance3 = 0
Distance3 = MeasureDistance3()
print("3 Fertig")
print("Messe Temp. Sensor S1")
temperatur = 0
temperatur = get_temperatur_s1()
print("--- Messungen Fertig ---")
# Ziehe ca. 4 cm ab, da der Tank nicht ganz voll befuellt werden kann
##Distance = Distance -4
# 6 dezimeter * 8 dezimeter * hoehe/10
# da 10*10*10 dezimeter = 1 Liter
# Max 620 Liter
# Volumen =
LuftVolumen = 7 * 9 * round(Distance,0) * 0.1
LuftVolumen2 = 7 * 9 * round(Distance2,0) * 0.1
LuftVolumen3 = 7.5 * 7.5 * round(Distance3,0) * 0.1
OelVolumen = 662 - LuftVolumen
OelVolumen2 = 662 - LuftVolumen2
OelVolumen3 = 760 - LuftVolumen3
liter = LuftVolumen - 620
zeit = ""
zeit = time.strftime("%Y-%m-%d_%H:%M")
print("messe:Zeit,\t\t Distanz, Oelvolumen in Liter, Temp.")
print("t1 %s\t\t%3.2f %3.0f\t\t %3.0f" % (zeit, Distance, OelVolumen, temperatur))
print("t2 %s\t\t%3.2f %3.0f\t\t %3.0f" % (zeit, Distance2, OelVolumen2, temperatur))
print("t3 %s\t\t%3.2f %3.0f\t\t %3.0f" % (zeit, Distance3, OelVolumen3, temperatur))
datastring = "%s;%.2f;%.0f;%.0f;%.2f;%.0f;%.0f;%.2f;%.0f;%.0f;%.1f\n" % (zeit, Distance, LuftVolumen, OelVolumen,Distance2, LuftVolumen2, OelVolumen2, Distance3, LuftVolumen3, OelVolumen3, temperatur)
update_display(round(Distance,1), round(Distance2,1), round(Distance3,1), round(OelVolumen,1), round(OelVolumen2,1), round (OelVolumen3,1))
print("Display Updated")
text = update_webserver(round(Distance,1), round(Distance2,1), round(Distance3,1), round(OelVolumen,1), round(OelVolumen2,1), round(OelVolumen3,1))
print("Webserver Updated")
fh = open('/home/pi/datafile.csv', 'a')
fh.write(datastring)
fh.close
print("Saved to file.")
#fh2 = open('/var/www/html/stand.txt', 'w')
#fh2.write(text)
#fh2.close
tail_last_lines()
render_data("/home/pi/datafile_last_week.csv","/var/www/html/stand7.png","Oelstand letzten 7 Tage")
render_data("/home/pi/datafile.csv","/var/www/html/stand.png", "Oelstand gesamt")
print("Render complete.")
#time.sleep(600)
except Exception as e:
errorlog = '/home/pi/error.log'
error_string = "Exception on %s . Measurement stopped. Testprog. \n" % (zeit)
fh3 = open(errorlog, 'a')
fh3.write(error_string)
fh3.write('-'*60 + '\n')
traceback.print_exc(file=fh3)
fh3.write('-'*60 + '\n')
fh3.close
GPIO.cleanup()
#GPIO2.cleanup()
def sensortest():
# lcd_string("Teste Sensor 1...",LCD_LINE_1,1)
lcd_string("Sensor Test...",LCD_LINE_1,1)
Distance = MeasureDistance()
lcd_string("T1: "+str(round(Distance,1))+" CM = OK" ,LCD_LINE_2,1)
time.sleep(1)
# lcd_string("Teste Sensor 2...",LCD_LINE_3,1)
Distance2 = MeasureDistance2()
lcd_string("T2: "+str(round(Distance2,1))+" CM = OK" ,LCD_LINE_3,1)
time.sleep(1)
Distance3 = MeasureDistance3()
lcd_string("T3: "+str(round(Distance3,1))+" CM = OK" ,LCD_LINE_4,1)
time.sleep(5)
if __name__ == '__main__':
while True:
GPIO.cleanup()
# use GPIO pin numbering convention
GPIO.setmode(GPIO.BCM)
GPIO2.setmode(GPIO2.BCM)
GPIO3.setmode(GPIO3.BCM)
GPIO4.setmode(GPIO3.BCM)
# Main program block
GPIO3.setmode(GPIO3.BCM) # Use BCM GPIO numbers
GPIO3.setup(LCD_E, GPIO3.OUT) # E
GPIO3.setup(LCD_RS, GPIO3.OUT) # RS
GPIO3.setup(LCD_D4, GPIO3.OUT) # DB4
GPIO3.setup(LCD_D5, GPIO3.OUT) # DB5
GPIO3.setup(LCD_D6, GPIO3.OUT) # DB6
GPIO3.setup(LCD_D7, GPIO3.OUT) # DB7
# Initialise display
lcd_init()
# set up GPIO pins
# lcd_string("Teste Sensor 1...",LCD_LINE_1,1)
GPIO.setup(GPIOTrigger, GPIO.OUT)
GPIO.setup(GPIOEcho, GPIO.IN)
# lcd_string("OK",LCD_LINE_2,1)
#lcd_string("Teste Sensor 2...",LCD_LINE_3,1)
GPIO2.setup(GPIO2Trigger, GPIO2.OUT)
GPIO2.setup(GPIO2Echo, GPIO2.IN)
# lcd_string("OK",LCD_LINE_4,1)
GPIO4.setup(GPIO3Trigger, GPIO4.OUT)
GPIO4.setup(GPIO3Echo, GPIO4.IN)
# set trigger to false
lcd_string("Starte Programm",LCD_LINE_1,1)
lcd_string("",LCD_LINE_2,1)
lcd_string("",LCD_LINE_3,1)
lcd_string("",LCD_LINE_4,1)
#GPIO2.output(GPIO2Trigger, False)
# call main function
#try:
sensortest()
main()
print("Run complete.")
time.sleep(1800)
#except KeyboardInterrupt:
# pass
#finally:
# lcd_byte(0x01, LCD_CMD)
# lcd_string("Program beendet!",LCD_LINE_1,2)
# lcd_string("Tschuldigung!",LCD_LINE_2,2)
|
gpl-3.0
|
courtarro/gnuradio
|
gr-fec/python/fec/polar/channel_construction_awgn.py
|
24
|
8560
|
#!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Based on 2 papers:
[1] Ido Tal, Alexander Vardy: 'How To Construct Polar Codes', 2013
for an in-depth description of a widely used algorithm for channel construction.
[2] Harish Vangala, Emanuele Viterbo, Yi Hong: 'A Comparative Study of Polar Code Constructions for the AWGN Channel', 2015
for an overview of different approaches
'''
from scipy.optimize import fsolve
from scipy.special import erfc
from helper_functions import *
from channel_construction_bec import bhattacharyya_bounds
def solver_equation(val, s):
cw_lambda = codeword_lambda_callable(s)
ic_lambda = instantanious_capacity_callable()
return lambda y: ic_lambda(cw_lambda(y)) - val
def solve_capacity(a, s):
eq = solver_equation(a, s)
res = fsolve(eq, 1)
return np.abs(res[0]) # only positive values needed.
def codeword_lambda_callable(s):
return lambda y: np.exp(-2 * y * np.sqrt(2 * s))
def codeword_lambda(y, s):
return codeword_lambda_callable(s)(y)
def instantanious_capacity_callable():
return lambda x : 1 - np.log2(1 + x) + (x * np.log2(x) / (1 + x))
def instantanious_capacity(x):
return instantanious_capacity_callable()(x)
def q_function(x):
# Q(x) = (1 / sqrt(2 * pi) ) * integral (x to inf) exp(- x ^ 2 / 2) dx
return .5 * erfc(x / np.sqrt(2))
def discretize_awgn(mu, design_snr):
'''
needed for Binary-AWGN channels.
in [1] described in Section VI
in [2] described as a function of the same name.
in both cases reduce infinite output alphabet to a finite output alphabet of a given channel.
idea:
1. instantaneous capacity C(x) in interval [0, 1]
2. split into mu intervals.
3. find corresponding output alphabet values y of likelihood ratio function lambda(y) inserted into C(x)
4. Calculate probability for each value given that a '0' or '1' is was transmitted.
'''
s = 10 ** (design_snr / 10)
a = np.zeros(mu + 1, dtype=float)
a[-1] = np.inf
for i in range(1, mu):
a[i] = solve_capacity(1. * i / mu, s)
factor = np.sqrt(2 * s)
tpm = np.zeros((2, mu))
for j in range(mu):
tpm[0][j] = q_function(factor + a[j]) - q_function(factor + a[j + 1])
tpm[1][j] = q_function(-1. * factor + a[j]) - q_function(-1. * factor + a[j + 1])
tpm = tpm[::-1]
tpm[0] = tpm[0][::-1]
tpm[1] = tpm[1][::-1]
return tpm
def instant_capacity_delta_callable():
return lambda a, b: -1. * (a + b) * np.log2((a + b) / 2) + a * np.log2(a) + b * np.log2(b)
def capacity_delta_callable():
c = instant_capacity_delta_callable()
return lambda a, b, at, bt: c(a, b) + c(at, bt) - c(a + at, b + bt)
def quantize_to_size(tpm, mu):
# This is a degrading merge, compare [1]
calculate_delta_I = capacity_delta_callable()
L = np.shape(tpm)[1]
if not mu < L:
print('WARNING: This channel gets too small!')
# lambda works on vectors just fine. Use Numpy vector awesomeness.
delta_i_vec = calculate_delta_I(tpm[0, 0:-1], tpm[1, 0:-1], tpm[0, 1:], tpm[1, 1:])
for i in range(L - mu):
d = np.argmin(delta_i_vec)
ap = tpm[0, d] + tpm[0, d + 1]
bp = tpm[1, d] + tpm[1, d + 1]
if d > 0:
delta_i_vec[d - 1] = calculate_delta_I(tpm[0, d - 1], tpm[1, d - 1], ap, bp)
if d < delta_i_vec.size - 1:
delta_i_vec[d + 1] = calculate_delta_I(ap, bp, tpm[0, d + 1], tpm[1, d + 1])
delta_i_vec = np.delete(delta_i_vec, d)
tpm = np.delete(tpm, d, axis=1)
tpm[0, d] = ap
tpm[1, d] = bp
return tpm
def upper_bound_z_params(z, block_size, design_snr):
upper_bound = bhattacharyya_bounds(design_snr, block_size)
z = np.minimum(z, upper_bound)
return z
def tal_vardy_tpm_algorithm(block_size, design_snr, mu):
mu = mu // 2 # make sure algorithm uses only as many bins as specified.
block_power = power_of_2_int(block_size)
channels = np.zeros((block_size, 2, mu))
channels[0] = discretize_awgn(mu, design_snr) * 2
print('Constructing polar code with Tal-Vardy algorithm')
print('(block_size = {0}, design SNR = {1}, mu = {2}'.format(block_size, design_snr, 2 * mu))
show_progress_bar(0, block_size)
for j in range(0, block_power):
u = 2 ** j
for t in range(u):
show_progress_bar(u + t, block_size)
# print("(u={0}, t={1}) = {2}".format(u, t, u + t))
ch1 = upper_convolve(channels[t], mu)
ch2 = lower_convolve(channels[t], mu)
channels[t] = quantize_to_size(ch1, mu)
channels[u + t] = quantize_to_size(ch2, mu)
z = np.zeros(block_size)
for i in range(block_size):
z[i] = bhattacharyya_parameter(channels[i])
z = z[bit_reverse_vector(np.arange(block_size), block_power)]
z = upper_bound_z_params(z, block_size, design_snr)
show_progress_bar(block_size, block_size)
print('')
print('channel construction DONE')
return z
def merge_lr_based(q, mu):
lrs = q[0] / q[1]
vals, indices, inv_indices = np.unique(lrs, return_index=True, return_inverse=True)
# compare [1] (20). Ordering of representatives according to LRs.
temp = np.zeros((2, len(indices)), dtype=float)
if vals.size < mu:
return q
for i in range(len(indices)):
merge_pos = np.where(inv_indices == i)[0]
sum_items = q[:, merge_pos]
if merge_pos.size > 1:
sum_items = np.sum(q[:, merge_pos], axis=1)
temp[0, i] = sum_items[0]
temp[1, i] = sum_items[1]
return temp
def upper_convolve(tpm, mu):
q = np.zeros((2, mu ** 2))
idx = -1
for i in range(mu):
idx += 1
q[0, idx] = (tpm[0, i] ** 2 + tpm[1, i] ** 2) / 2
q[1, idx] = tpm[0, i] * tpm[1, i]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j] + tpm[1, i] * tpm[1, j]
q[1, idx] = tpm[0, i] * tpm[1, j] + tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def lower_convolve(tpm, mu):
q = np.zeros((2, mu * (mu + 1)))
idx = -1
for i in range(0, mu):
idx += 1
q[0, idx] = (tpm[0, i] ** 2) / 2
q[1, idx] = (tpm[1, i] ** 2) / 2
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, i]
q[1, idx] = q[0, idx]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j]
q[1, idx] = tpm[1, i] * tpm[1, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, j]
q[1, idx] = tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def swap_values(first, second):
return second, first
def normalize_q(q, tpm):
original_factor = np.sum(tpm)
next_factor = np.sum(q)
factor = original_factor / next_factor
return q * factor
def main():
print 'channel construction AWGN main'
n = 8
m = 2 ** n
design_snr = 0.0
mu = 16
z_params = tal_vardy_tpm_algorithm(m, design_snr, mu)
print(z_params)
if 0:
import matplotlib.pyplot as plt
plt.plot(z_params)
plt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
rrohan/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
11
|
48140
|
"""
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import ignore_warnings
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y )
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.