repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
IntelPython/ibench
|
ibench/benchmarks/svm.py
|
1
|
1545
|
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import numpy as np
import sklearn, sklearn.utils
import sklearn.svm as svm
from sklearn.datasets import make_classification
from sklearn.metrics import accuracy_score
from .bench import Bench
sklearn._ASSUME_FINITE = True
if sklearn.__version__ == '0.18.2':
sklearn.utils.validation._assert_all_finite = lambda X: None
features = [10, 50, 100, 200, 400, 800, 1000, 2000]
vectors = [1000, 2000, 4000, 10000, 20000]
class Svm(Bench):
"""
Benchmark for Ridge Regression Prediction from Scikit-learn
Attempts to utilize parallelism for larger datasets
"""
sizes = {'large': 5, 'small': 3, 'tiny': 2, 'test': 1}
def _gen_datasets(self, features, vectors, classes, dest='data'):
"""Generate classification datasets in binary .npy files
features: a list of feature lengths to test
vectors: a list of sample lengths to test
classes: number of classes (2 for binary classification dataset)
"""
self._X, self._y = make_classification(n_samples=vectors, n_features=features, n_informative=features, n_redundant=0, n_classes=classes, random_state=0)
return self._X, self._y
def _ops(self, n):
return 2E-9 * n
def _make_args(self, n):
self._X, self._y = self._gen_datasets(features[n-1],vectors[n-1],2)
self._clf = svm.SVC(C=0.01, kernel='linear', max_iter=10000, tol=1e-16, shrinking=True)
def _compute(self):
self._clf.fit(self._X, self._y)
|
mit
|
sonnyhu/scipy
|
scipy/special/c_misc/struve_convergence.py
|
76
|
3725
|
"""
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
|
bsd-3-clause
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/doc/mpl_toolkits/axes_grid/figures/axis_direction_demo_step03.py
|
6
|
1165
|
import matplotlib.pyplot as plt
import mpl_toolkits.axisartist as axisartist
def setup_axes(fig, rect):
ax = axisartist.Subplot(fig, rect)
fig.add_axes(ax)
ax.set_ylim(-0.1, 1.5)
ax.set_yticks([0, 1])
#ax.axis[:].toggle(all=False)
#ax.axis[:].line.set_visible(False)
ax.axis[:].set_visible(False)
ax.axis["x"] = ax.new_floating_axis(1, 0.5)
ax.axis["x"].set_axisline_style("->", size=1.5)
return ax
fig = plt.figure(figsize=(6,2.5))
fig.subplots_adjust(bottom=0.2, top=0.8)
ax1 = setup_axes(fig, "121")
ax1.axis["x"].label.set_text("Label")
ax1.axis["x"].toggle(ticklabels=False)
ax1.axis["x"].set_axislabel_direction("+")
ax1.annotate("label direction=$+$", (0.5, 0), xycoords="axes fraction",
xytext=(0, -10), textcoords="offset points",
va="top", ha="center")
ax2 = setup_axes(fig, "122")
ax2.axis["x"].label.set_text("Label")
ax2.axis["x"].toggle(ticklabels=False)
ax2.axis["x"].set_axislabel_direction("-")
ax2.annotate("label direction=$-$", (0.5, 0), xycoords="axes fraction",
xytext=(0, -10), textcoords="offset points",
va="top", ha="center")
plt.show()
|
mit
|
sniemi/SamPy
|
herschel/plotFluxDistribution.py
|
1
|
5322
|
import matplotlib
matplotlib.use('PS')
matplotlib.rc('text', usetex = True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize = 13)
matplotlib.rc('ytick', labelsize = 13)
matplotlib.rc('axes', linewidth = 1.2)
matplotlib.rcParams['legend.fontsize'] = 12
matplotlib.rcParams['legend.handlelength'] = 5
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
import numpy as N
import pylab as P
import os
import matplotlib.ticker as ticker
#Sami's repo
import db.sqlite
import astronomy.conversions as cv
def plot_flux_dist(table, zmin, zmax, depths, colname,
path, database, out_folder,
solid_angle = 10*160.,
fluxbins = 22,
ymin = 1e-7, ymax = 1e-1,
bins = 8, H0 = 70.0, WM=0.28):
query = '''select %s from %s where %s.z >= %.4f and %s.z < %.4f
and FIR.spire250_obs < 1e4''' % \
(colname, table, table, zmin, table, zmax)
#get fluxes in mJy
fluxes = db.sqlite.get_data_sqlite(path, database, query)*1e3
#mass bins
fd = (zmax - zmin) / float(bins)
#number of rows for subplots
rows = int(N.sqrt(bins))
#min and max x values
xmin = 0.0
xmax = 50
fbins = N.linspace(xmin, xmax, fluxbins)
df = fbins[1] - fbins[0]
#calculate volume
comovingVol = cv.comovingVolume(solid_angle, 0, zmax,
H0 = H0, WM = WM)
#weight each galaxy
wghts = (N.zeros(len(fluxes)) + (1./comovingVol)) / df
#make the figure
fig = P.figure()
P.subplots_adjust(wspace = 0.0, hspace = 0.0)
ax = fig.add_subplot(int(bins/rows)-1, rows + 1, 1)
ax.hist(fluxes, bins = fbins,
log = True,
weights = wghts)
if depths.has_key(colname):
P.axvline(depths[colname], color = 'g',
ls = '--')
P.text(0.5, 0.9,
r'$ %.1f \leq z < %.1f $' % (zmin, zmax),
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes,
fontsize = 12)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xticklabels([])
#ax.set_yticks(ax.get_yticks()[:-1:2])
#redshift limited plots
zm = zmin
for i in range(bins):
zmax = zm + fd
query = '''select %s from %s where %s.z >= %.4f and %s.z < %.4f
and FIR.spire250_obs < 1e4''' % \
(colname, table, table, zm, table, zmax)
fluxes = db.sqlite.get_data_sqlite(path, database, query)*1e3
#calculate volume
comovingVol = cv.comovingVolume(solid_angle, zm, zmax,
H0 = H0, WM = WM)
#weight each galaxy
wghts = (N.zeros(len(fluxes)) + (1./comovingVol)) / df
#add a subplot
ax = fig.add_subplot(int(bins/rows)-1, rows+1, i+2)
ax.hist(fluxes, bins = fbins,
log = True,
weights = wghts)
if depths.has_key(colname):
P.axvline(depths[colname], color = 'g',
ls = '--')
P.text(0.5, 0.9,
r'$ %.1f \leq z < %.1f $' % (zm, zmax),
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes,
fontsize = 12)
if i == 2 or i == 5:
ax.set_yticks(ax.get_yticks()[:-1])
#ax.yaxis.set_major_locator(ticker.MaxNLocator(prune = 'upper'))
else:
ax.set_yticklabels([])
if i == 5 or i == 6 or i == 7:
ax.set_xticks(ax.get_xticks()[:-1])
else:
ax.set_xticklabels([])
if i == 6:
ax.set_xlabel(r'$S_{250} \quad [\mathrm{mJy}]$')
if i == 2:
ax.set_ylabel(r'$\frac{\mathrm{d}N(S_{250})}{\mathrm{d}S_{250}} \quad [\mathrm{Mpc}^{-3} \ \mathrm{dex}^{-1}]$')
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
zm = zmax
#save figure
P.savefig(out_folder + 'FluxDist250.ps')
P.close()
if __name__ == '__main__':
#constants
#find the home directory, because the output is to dropbox
#and my user name is not always the same, this hack is required.
hm = os.getenv('HOME')
#constants
#path = hm + '/Dropbox/Research/Herschel/runs/reds_zero_dust_evolve/'
path = hm + '/Research/Herschel/runs/big_volume/'
database = 'sams.db'
out_folder = hm + '/Dropbox/Research/Herschel/plots/flux_dist/big/'
#5sigma limits derived by Kuang
depths = {'pacs100_obs': 1.7,
'pacs160_obs': 4.5,
'spire250_obs': 5.0,
'spire350_obs': 9.0,
'spire500_obs': 10.0
}
#passbands to be plotted
# bands = ['pacs70_obs',
# 'pacs100_obs',
# 'pacs160_obs',
# 'spire250_obs',
# 'spire350_obs',
# 'spire500_obs']
bands = ['spire250_obs']
#plot the flux distributions
for band in bands:
plot_flux_dist('FIR', 0.0, 4.0, depths,
band, path, database,
out_folder, solid_angle = 100*160.,
ymin = 1e-8, ymax = 7e-2)
|
bsd-2-clause
|
saketkc/statsmodels
|
statsmodels/sandbox/tsa/try_arma_more.py
|
34
|
3744
|
# -*- coding: utf-8 -*-
"""Periodograms for ARMA and time series
theoretical periodogram of ARMA process and different version
of periodogram estimation
uses scikits.talkbox and matplotlib
Created on Wed Oct 14 23:02:19 2009
Author: josef-pktd
"""
from __future__ import print_function
import numpy as np
from scipy import signal, ndimage
import matplotlib.mlab as mlb
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_process import arma_generate_sample, arma_periodogram
from statsmodels.tsa.stattools import acovf
hastalkbox = False
try:
import scikits.talkbox as stb
import scikits.talkbox.spectral.basic as stbs
except:
hastalkbox = False
ar = [1., -0.7]#[1,0,0,0,0,0,0,-0.7]
ma = [1., 0.3]
ar = np.convolve([1.]+[0]*50 +[-0.6], ar)
ar = np.convolve([1., -0.5]+[0]*49 +[-0.3], ar)
n_startup = 1000
nobs = 1000
# throwing away samples at beginning makes sample more "stationary"
xo = arma_generate_sample(ar,ma,n_startup+nobs)
x = xo[n_startup:]
#moved to tsa.arima_process
#def arma_periodogram(ar, ma, **kwds):
# '''periodogram for ARMA process given by lag-polynomials ar and ma
#
# Parameters
# ----------
# ar : array_like
# autoregressive lag-polynomial with leading 1 and lhs sign
# ma : array_like
# moving average lag-polynomial with leading 1
# kwds : options
# options for scipy.signal.freqz
# default: worN=None, whole=0
#
# Returns
# -------
# w : array
# frequencies
# sd : array
# periodogram, spectral density
#
# Notes
# -----
# Normalization ?
#
# '''
# w, h = signal.freqz(ma, ar, **kwds)
# sd = np.abs(h)**2/np.sqrt(2*np.pi)
# if np.sum(np.isnan(h)) > 0:
# # this happens with unit root or seasonal unit root'
# print 'Warning: nan in frequency response h'
# return w, sd
plt.figure()
plt.plot(x)
rescale = 0
w, h = signal.freqz(ma, ar)
sd = np.abs(h)**2/np.sqrt(2*np.pi)
if np.sum(np.isnan(h)) > 0:
# this happens with unit root or seasonal unit root'
print('Warning: nan in frequency response h')
h[np.isnan(h)] = 1.
rescale = 0
#replace with signal.order_filter ?
pm = ndimage.filters.maximum_filter(sd, footprint=np.ones(5))
maxind = np.nonzero(pm == sd)
print('local maxima frequencies')
wmax = w[maxind]
sdmax = sd[maxind]
plt.figure()
plt.subplot(2,3,1)
if rescale:
plt.plot(w, sd/sd[0], '-', wmax, sdmax/sd[0], 'o')
# plt.plot(w, sd/sd[0], '-')
# plt.hold()
# plt.plot(wmax, sdmax/sd[0], 'o')
else:
plt.plot(w, sd, '-', wmax, sdmax, 'o')
# plt.hold()
# plt.plot(wmax, sdmax, 'o')
plt.title('DGP')
sdm, wm = mlb.psd(x)
sdm = sdm.ravel()
pm = ndimage.filters.maximum_filter(sdm, footprint=np.ones(5))
maxind = np.nonzero(pm == sdm)
plt.subplot(2,3,2)
if rescale:
plt.plot(wm,sdm/sdm[0], '-', wm[maxind], sdm[maxind]/sdm[0], 'o')
else:
plt.plot(wm, sdm, '-', wm[maxind], sdm[maxind], 'o')
plt.title('matplotlib')
if hastalkbox:
sdp, wp = stbs.periodogram(x)
plt.subplot(2,3,3)
if rescale:
plt.plot(wp,sdp/sdp[0])
else:
plt.plot(wp, sdp)
plt.title('stbs.periodogram')
xacov = acovf(x, unbiased=False)
plt.subplot(2,3,4)
plt.plot(xacov)
plt.title('autocovariance')
nr = len(x)#*2/3
#xacovfft = np.fft.fft(xacov[:nr], 2*nr-1)
xacovfft = np.fft.fft(np.correlate(x,x,'full'))
#abs(xacovfft)**2 or equivalently
xacovfft = xacovfft * xacovfft.conj()
plt.subplot(2,3,5)
if rescale:
plt.plot(xacovfft[:nr]/xacovfft[0])
else:
plt.plot(xacovfft[:nr])
plt.title('fft')
if hastalkbox:
sdpa, wpa = stbs.arspec(x, 50)
plt.subplot(2,3,6)
if rescale:
plt.plot(wpa,sdpa/sdpa[0])
else:
plt.plot(wpa, sdpa)
plt.title('stbs.arspec')
#plt.show()
|
bsd-3-clause
|
JackKelly/neuralnilm_prototype
|
scripts/e240.py
|
2
|
6994
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 1000
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
ideas for next TODO:
* 3 LSTM layers with smaller conv between them
* why does pooling hurt us?
"""
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10,
subsample_target=5,
include_diff=False,
clip_appliance_power=True,
lag=0
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': LSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
# 'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
# {
# 'type': FeaturePoolLayer,
# 'ds': 5, # number of feature maps to be pooled together
# 'axis': 1 # pool over the time axis
# },
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
}
]
)
def exp_a(name):
# like 239 but LSTM not BLSTM and no lag and clip appliance power
# RESULTS: aweful
source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_b(name):
# as A but lag = 10
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 10
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_c(name):
# as A but lag = 20
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 20
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_d(name):
# as A but lag = 40
# possibly the best of this e240 lot
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 40
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_e(name):
# as A but lag = 80
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 80
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('abcde'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=10000)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
if __name__ == "__main__":
main()
|
mit
|
kirichoi/tellurium
|
examples/notebooks-py/tellurium_stochastic.py
|
2
|
2082
|
# coding: utf-8
# Back to the main [Index](../index.ipynb)
# #### Stochastic simulation
#
# Stochastic simulations can be run by changing the current integrator type to 'gillespie' or by using the `r.gillespie` function.
# In[1]:
#!!! DO NOT CHANGE !!! THIS FILE WAS CREATED AUTOMATICALLY FROM NOTEBOOKS !!! CHANGES WILL BE OVERWRITTEN !!! CHANGE CORRESPONDING NOTEBOOK FILE !!!
from __future__ import print_function
import tellurium as te
te.setDefaultPlottingEngine('matplotlib')
get_ipython().magic(u'matplotlib inline')
import numpy as np
r = te.loada('S1 -> S2; k1*S1; k1 = 0.1; S1 = 40')
r.integrator = 'gillespie'
r.integrator.seed = 1234
results = []
for k in range(1, 50):
r.reset()
s = r.simulate(0, 40)
results.append(s)
r.plot(s, show=False, alpha=0.7)
te.show()
# #### Seed
#
# Setting the identical seed for all repeats results in identical traces in each simulation.
# In[2]:
results = []
for k in range(1, 20):
r.reset()
r.setSeed(123456)
s = r.simulate(0, 40)
results.append(s)
r.plot(s, show=False, loc=None, color='black', alpha=0.7)
te.show()
# #### Combining Simulations
#
# You can combine two timecourse simulations and change e.g. parameter values in between each simulation. The `gillespie` method simulates up to the given end time `10`, after which you can make arbitrary changes to the model, then simulate again.
#
# When using the `te.plot` function, you can pass the parameter `names`, which controls the names that will be used in the figure legend, and `tags`, which ensures that traces with the same tag will be drawn with the same color.
# In[3]:
import tellurium as te
import numpy as np
r = te.loada('S1 -> S2; k1*S1; k1 = 0.02; S1 = 100')
r.setSeed(1234)
for k in range(1, 20):
r.resetToOrigin()
res1 = r.gillespie(0, 10)
# change in parameter after the first half of the simulation
r.k1 = r.k1*20
res2 = r.gillespie (10, 20)
sim = np.vstack([res1, res2])
te.plot(sim[:,0], sim[:,1:], alpha=0.7, names=['S1', 'S2'], tags=['S1', 'S2'], show=False)
te.show()
|
apache-2.0
|
ua-snap/downscale
|
old/bin/tas_cld_cru_ts31_to_cl20_downscaling.py
|
3
|
23577
|
# # #
# Current implementation of the cru ts31 (ts32) delta downscaling procedure
#
# Author: Michael Lindgren ([email protected])
# # #
import numpy as np
def write_gtiff( output_arr, template_meta, output_filename, compress=True ):
'''
DESCRIPTION:
------------
output a GeoTiff given a numpy ndarray, rasterio-style
metadata dictionary, and and output_filename.
If a multiband file is to be processed, the Longitude
dimension is expected to be the right-most.
--> dimensions should be (band, latitude, longitude)
ARGUMENTS:
----------
output_arr = [numpy.ndarray] with longitude as the right-most dimension
template_meta = [dict] rasterio-style raster meta dictionary. Typically
found in a template raster by: rasterio.open( fn ).meta
output_filename = [str] path to and name of the output GeoTiff to be
created. currently only 'GTiff' is supported.
compress = [bool] if True (default) LZW-compression is applied to the
output GeoTiff. If False, no compression is applied.
* this can also be added (along with many other gdal creation options)
to the template meta as a key value pair template_meta.update( compress='lzw' ).
See Rasterio documentation for more details. This is just a common one that is
RETURNS:
--------
string path to the new output_filename created
'''
import os
if 'transform' in template_meta.keys():
_ = template_meta.pop( 'transform' )
if not output_filename.endswith( '.tif' ):
UserWarning( 'output_filename does not end with ".tif", it has been fixed for you.' )
output_filename = os.path.splitext( output_filename )[0] + '.tif'
if output_arr.ndim == 2:
# add in a new dimension - can get you into trouble with very large rasters...
output_arr = output_arr[ np.newaxis, ... ]
elif output_arr.ndim < 2:
raise ValueError( 'output_arr must have at least 2 dimensions' )
nbands, nrows, ncols = output_arr.shape
if template_meta[ 'count' ] != nbands:
raise ValueError( 'template_meta[ "count" ] must match output_arr bands' )
if compress == True and 'compress' not in template_meta.keys():
template_meta.update( compress='lzw' )
with rasterio.open( output_filename, 'w', **template_meta ) as out:
for band in range( 1, nbands+1 ):
out.write( output_arr[ band-1, ... ], band )
return output_filename
def shiftgrid( lon0, datain, lonsin, start=True, cyclic=360.0 ):
import numpy as np
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0
if np.ma.isMA(datain):
dataout = np.ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if np.ma.isMA(lonsin):
lonsout = np.ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def bounds_to_extent( bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
def padded_bounds( rst, npixels, crs ):
'''
convert the extents of 2 overlapping rasters to a shapefile with
an expansion of the intersection of the rasters extents by npixels
rst1: rasterio raster object
rst2: rasterio raster object
npixels: tuple of 4 (left(-),bottom(-),right(+),top(+)) number of pixels to
expand in each direction. for 5 pixels in each direction it would look like
this: (-5. -5. 5, 5) or just in the right and top directions like this:
(0,0,5,5).
crs: epsg code or proj4string defining the geospatial reference
system
output_shapefile: string full path to the newly created output shapefile
'''
import rasterio, os, sys
from shapely.geometry import Polygon
resolution = rst.res[0]
new_bounds = [ bound+(expand*resolution) for bound, expand in zip( rst.bounds, npixels ) ]
return new_bounds
def xyz_to_grid( x, y, z, grid, method='cubic', output_dtype=np.float32 ):
'''
interpolate points to a grid. simple wrapper around
scipy.interpolate.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using numpy.meshgrid()
order (xi, yi)
method = one of 'cubic', 'near', linear
'''
import numpy as np
from scipy.interpolate import griddata
zi = griddata( (x, y), z, grid, method=method )
zi = np.flipud( zi.astype( output_dtype ) )
return zi
def generate_anomalies( df, meshgrid_tuple, lons_pcll, template_raster_fn, src_transform, src_crs, src_nodata, output_filename, *args, **kwargs ):
'''
run the interpolation to a grid, and reprojection / resampling to the Alaska / Canada rasters
extent, resolution, origin (template_raster).
This function is intended to be used to run a pathos.multiprocessing Pool's map function
across a list of pre-computed arguments.
RETURNS:
[str] path to the output filename generated
'''
template_raster = rasterio.open( template_raster_fn )
template_meta = template_raster.meta
if 'transform' in template_meta.keys():
template_meta.pop( 'transform' )
# update some meta configs
template_meta.update( crs={'init':'epsg:3338'} )
template_meta.update( compress='lzw' )
interp_arr = xyz_to_grid( np.array(df['lon'].tolist()), \
np.array(df['lat'].tolist()), \
np.array(df['anom'].tolist()), grid=meshgrid_tuple, method='cubic' )
src_nodata = -9999.0 # nodata
interp_arr[ np.isnan( interp_arr ) ] = src_nodata
dat, lons = shiftgrid( 180., interp_arr, lons_pcll, start=False )
output_arr = np.empty_like( template_raster.read( 1 ) )
reproject( dat, output_arr, src_transform=src_transform, src_crs=src_crs, src_nodata=src_nodata, \
dst_transform=template_meta['affine'], dst_crs=template_meta['crs'],\
dst_nodata=None, resampling=RESAMPLING.cubic_spline, num_threads=1, SOURCE_EXTRA=1000 )
# mask it with the internal mask in the template raster, where 0 is oob.
output_arr = np.ma.masked_where( template_raster.read_masks( 1 ) == 0, output_arr )
output_arr.fill_value = template_meta[ 'nodata' ]
output_arr = output_arr.filled()
return write_gtiff( output_arr, template_meta, output_filename, compress=True )
def fn_month_grouper( x ):
'''
take a filename and return the month element of the naming convention
'''
return os.path.splitext(os.path.basename(x))[0].split( '_' )[5]
def downscale_cru_historical( file_list, cru_cl20_arr, output_path, downscaling_operation ):
'''
take a list of cru_historical anomalies filenames, groupby month,
then downscale with the cru_cl20 climatology as a numpy 2d ndarray
that is also on the same grid as the anomalies files.
(intended to be the akcan 1km/2km extent).
operation can be one of 'mult', 'add', 'div' and represents the
downscaling operation to be use to scale the anomalies on top of the baseline.
this is based on how the anomalies were initially calculated.
RETURNS:
output path location of the new downscaled files.
'''
from functools import partial
def f( anomaly_fn, baseline_arr, output_path, downscaling_operation ):
def add( cru, anom ):
return cru + anom
def mult( cru, anom ):
return cru * anom
def div( cru, anom ):
# return cru / anom
# this one may not be useful, but the placeholder is here
return NotImplementedError
cru_ts31 = rasterio.open( anomaly_fn )
meta = cru_ts31.meta
meta.update( compress='lzw', crs={'init':'epsg:3338'} )
cru_ts31 = cru_ts31.read( 1 )
operation_switch = { 'add':add, 'mult':mult, 'div':div }
# this is hardwired stuff for this fairly hardwired script.
output_filename = os.path.basename( anomaly_fn ).replace( 'anom', 'downscaled' )
output_filename = os.path.join( output_path, output_filename )
# both files need to be masked here since we use a RIDICULOUS oob value...
# for both tas and cld, values less than -200 are out of the range of acceptable values and it
# grabs the -3.4... mask values. so lets mask using this
baseline_arr = np.ma.masked_where( baseline_arr < -200, baseline_arr )
cru_ts31 = np.ma.masked_where( cru_ts31 < -200, cru_ts31 )
output_arr = operation_switch[ downscaling_operation ]( baseline_arr, cru_ts31 )
output_arr[ np.isinf(output_arr) ] = meta[ 'nodata' ]
if 'transform' in meta.keys():
meta.pop( 'transform' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( output_arr, 1 )
return output_filename
partial_f = partial( f, baseline_arr=cru_cl20_arr, output_path=output_path, downscaling_operation=downscaling_operation )
cru_ts31 = file_list.apply( lambda fn: partial_f( anomaly_fn=fn ) )
return output_path
if __name__ == '__main__':
import rasterio, xray, os, glob, affine
from rasterio.warp import reproject, RESAMPLING
import geopandas as gpd
import pandas as pd
import numpy as np
from collections import OrderedDict
from shapely.geometry import Point
from pathos import multiprocessing as mp
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='preprocess cmip5 input netcdf files to a common type and single files' )
parser.add_argument( "-hi", "--cru_ts31", action='store', dest='cru_ts31', type=str, help="path to historical tas/cld CRU TS3.1 input NetCDF file" )
parser.add_argument( "-ci", "--cl20_path", action='store', dest='cl20_path', type=str, help="path to historical CRU TS2.0 Climatology input directory in single-band GTiff Format" )
parser.add_argument( "-tr", "--template_raster_fn", action='store', dest='template_raster_fn', type=str, help="path to ALFRESCO Formatted template raster to match outputs to." )
parser.add_argument( "-base", "--base_path", action='store', dest='base_path', type=str, help="string path to the folder to put the output files into" )
parser.add_argument( "-bt", "--year_begin", action='store', dest='year_begin', type=int, help="string in format YYYY of the beginning year in the series" )
parser.add_argument( "-et", "--year_end", action='store', dest='year_end', type=int, help="string in format YYYY of the ending year in the series" )
parser.add_argument( "-cbt", "--climatology_begin_time", nargs='?', const='196101', action='store', dest='climatology_begin', type=str, help="string in format YYYY of the beginning year of the climatology period" )
parser.add_argument( "-cet", "--climatology_end_time", nargs='?', const='199012', action='store', dest='climatology_end', type=str, help="string in format YYYY of the ending year of the climatology period" )
parser.add_argument( "-nc", "--ncores", nargs='?', const=2, action='store', dest='ncores', type=int, help="integer valueof number of cores to use. default:2" )
parser.add_argument( "-at", "--anomalies_calc_type", nargs='?', const='absolute', action='store', dest='anomalies_calc_type', type=str, help="string of 'proportional' or 'absolute' to inform of anomalies calculation type to perform." )
parser.add_argument( "-m", "--metric", nargs='?', const='metric', action='store', dest='metric', type=str, help="string of whatever the metric type is of the outputs to put in the filename." )
parser.add_argument( "-dso", "--downscaling_operation", action='store', dest='downscaling_operation', type=str, help="string of 'add', 'mult', 'div', which refers to the type or downscaling operation to use." )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="string of the abbreviation used to identify the variable (i.e. cld)." )
# parse args
args = parser.parse_args()
# unpack args
ncores = args.ncores
base_path = args.base_path
cru_ts31 = args.cru_ts31
cl20_path = args.cl20_path
template_raster_fn = args.template_raster_fn
anomalies_calc_type = args.anomalies_calc_type
downscaling_operation = args.downscaling_operation
climatology_begin = args.climatology_begin
climatology_end = args.climatology_end
year_begin = args.year_begin
year_end = args.year_end
variable = args.variable
metric = args.metric
# make some output directories if they are not there already to dump
# our output files
anomalies_path = os.path.join( base_path, variable, 'anom' )
if not os.path.exists( anomalies_path ):
os.makedirs( anomalies_path )
downscaled_path = os.path.join( base_path, variable, 'downscaled' )
if not os.path.exists( downscaled_path ):
os.makedirs( downscaled_path )
# open with xray
cru_ts31 = xray.open_dataset( cru_ts31 )
# open template raster
template_raster = rasterio.open( template_raster_fn )
template_meta = template_raster.meta
template_meta.update( crs={'init':'epsg:3338'} )
# make a mask with values of 0=nodata and 1=data
template_raster_mask = template_raster.read_masks( 1 )
template_raster_mask[ template_raster_mask == 255 ] = 1
# calculate the anomalies
# this is temporary name change for the tmp (tas) data naming diff.
if variable == 'tas':
variable = 'tmp'
clim_ds = cru_ts31.loc[ {'time':slice(climatology_begin,climatology_end)} ]
climatology = clim_ds[ variable ].groupby( 'time.month' ).mean( 'time' )
if anomalies_calc_type == 'relative':
anomalies = cru_ts31[ variable ].groupby( 'time.month' ) / climatology
if anomalies_calc_type == 'absolute':
anomalies = cru_ts31[ variable ].groupby( 'time.month' ) - climatology
# reset the variable if tas
if variable == 'tmp':
variable = 'tas'
# rotate the anomalies to pacific centered latlong -- this is already in the greenwich latlong
dat_pcll, lons_pcll = shiftgrid( 0., anomalies, anomalies.lon.data )
# # generate an expanded extent (from the template_raster) to interpolate across
template_raster = rasterio.open( template_raster_fn )
# output_resolution = (1000.0, 1000.0) # hardwired, but we are building this for IEM which requires 1km
template_meta = template_raster.meta
# # interpolate to a new grid
# get longitudes and latitudes using meshgrid
lo, la = [ i.ravel() for i in np.meshgrid( lons_pcll, anomalies.lat ) ] # mesh the lons/lats
# convert into GeoDataFrame and drop all the NaNs
df_list = [ pd.DataFrame({ 'anom':i.ravel(), 'lat':la, 'lon':lo }).dropna( axis=0, how='any' ) for i in dat_pcll ]
xi, yi = np.meshgrid( lons_pcll, anomalies.lat.data )
# meshgrid_tuple = np.meshgrid( lons_pcll, anomalies.lat.data )
# argument setup
src_transform = affine.Affine( 0.5, 0.0, -180.0, 0.0, -0.5, 90.0 )
src_crs = {'init':'epsg:4326'}
src_nodata = -9999.0
# output_filenames setup
years = np.arange( int(year_begin), int(year_end)+1, 1 ).astype( str ).tolist()
months = [ i if len(i)==2 else '0'+i for i in np.arange( 1, 12+1, 1 ).astype( str ).tolist() ]
month_year = [ (month, year) for year in years for month in months ]
output_filenames = [ os.path.join( anomalies_path, '_'.join([ variable,metric,'cru_ts323_anom',month,year])+'.tif' )
for month, year in month_year ]
# build a list of keyword args to pass to the pool of workers.
args_list = [ {'df':df, 'meshgrid_tuple':(xi, yi), 'lons_pcll':lons_pcll, \
'template_raster_fn':template_raster_fn, 'src_transform':src_transform, \
'src_crs':src_crs, 'src_nodata':src_nodata, 'output_filename':fn } \
for df, fn in zip( df_list, output_filenames ) ]
# interpolate / reproject / resample the anomalies to match template_raster_fn
pool = mp.Pool( processes=ncores )
out = pool.map( lambda args: generate_anomalies( **args ), args_list )
pool.close()
# To Complete the CRU TS3.1 Downscaling we need the following:
# read in the pre-processed CL2.0 Cloud Climatology
l = sorted( glob.glob( os.path.join( cl20_path, '*.tif' ) ) ) # this could catch you.
cl20_dict = { month:rasterio.open( fn ).read( 1 ) for month, fn in zip( months, l ) }
# group the data by months
out = pd.Series( out )
out_months = out.apply( fn_month_grouper )
months_grouped = out.groupby( out_months )
# unpack groups for parallelization and make a list of tuples of arguments to pass to the downscale function
mg = [(i,j) for i,j in months_grouped ]
args_list = [ ( i[1], cl20_dict[i[0]], downscaled_path, downscaling_operation ) for i in mg ]
# downscale / write to disk
pool = mp.Pool( processes=ncores )
out = pool.map( lambda args: downscale_cru_historical( *args ), args_list )
pool.close()
# # # # # HOW TO RUN THE APPLICATION # # # # # # #
# # input args -- argparse it
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '10'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_ts31'
# cru_ts31 = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS31/cru_ts_3_10.1901.2009.cld.dat.nc' # 'cru_ts_3_10.1901.2009.tmp.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'relative' # 'absolute'
# downscaling_operation = 'mult' # 'add'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2009'
# variable = 'cld' # 'tas'
# metric = 'pct' # 'C'
# args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython2.7 -- tas_cld_cru_ts31_to_cl20_downscaling.py ' + args )
# # # # #TAS# # # # # # #
# # input args -- argparse it
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '5'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_ts31'
# cru_ts31 = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS31/cru_ts_3_10.1901.2009.tmp.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/tas/akcan'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'absolute'
# downscaling_operation = 'add'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2009'
# variable = 'tas'
# metric = 'C'
# args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython2.7 -- tas_cld_cru_ts31_to_cl20_downscaling.py ' + args )
# # # CRU TS 3.23 -- update:
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '10'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323'
# cru_ts31 = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323/cru_ts3.23.1901.2014.cld.dat.nc' # 'cru_ts_3_10.1901.2009.tmp.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'relative' # 'absolute'
# downscaling_operation = 'mult' # 'add'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2014'
# variable = 'cld' # 'tas'
# metric = 'pct' # 'C'
# args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython2.7 -- tas_cld_cru_ts31_to_cl20_downscaling.py ' + args )
# # TAS
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '10'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323'
# cru_ts31 = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323/cru_ts3.23.1901.2014.tmp.dat.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'absolute'
# downscaling_operation = 'add'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2014'
# variable = tas'
# metric = 'C'
# args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython2.7 -- tas_cld_cru_ts31_to_cl20_downscaling.py ' + args )
|
mit
|
sagemathinc/smc
|
src/smc_sagews/smc_sagews/tests/test_sagews_modes.py
|
5
|
10324
|
# test_sagews_modes.py
# tests of sage worksheet modes
from __future__ import absolute_import
import pytest
import conftest
import re
import os
from textwrap import dedent
class TestLatexMode:
def test_latex(self, execblob):
execblob(
"%latex\nhello",
want_html=False,
file_type='png',
ignore_stdout=True)
class TestP3Mode:
def test_p3a(self, exec2):
exec2("p3 = jupyter('python3')")
def test_p3b(self, exec2):
exec2("%p3\nimport sys\nprint(sys.version)", pattern=r"^3\.[56]\.\d+ ")
class TestSingularMode:
def test_singular_version(self, exec2):
exec2('%singular_kernel\nsystem("version");', pattern=r"^41\d\d\b")
def test_singular_factor_polynomial(self, exec2):
code = dedent('''
%singular_kernel
ring R1 = 0,(x,y),dp;
poly f = 9x16 - 18x13y2 - 9x12y3 + 9x10y4 - 18x11y2 + 36x8y4 + 18x7y5 - 18x5y6 + 9x6y4 - 18x3y6 - 9x2y7 + 9y8;
factorize(f);''').strip()
exec2(
code,
'[1]:\n _[1]=9\n _[2]=x6-2x3y2-x2y3+y4\n _[3]=-x5+y2\n[2]:\n 1,1,2\n'
)
import platform
# this comparison doesn't work in sage env
# because the distro version is spoofed
# platform.linux_distribution()[1] == "18.04",
@pytest.mark.skipif(
True,
reason="scala jupyter kernel broken in 18.04")
class TestScalaMode:
def test_scala_list(self, exec2):
print(("linux version {}".format(platform.linux_distribution()[1])))
exec2(
"%scala\nList(1,2,3)",
html_pattern="res0.*List.*Int.*List.*1.*2.*3",
timeout=80)
@pytest.mark.skipif(
True,
reason="scala jupyter kernel broken in 18.04")
class TestScala211Mode:
# example from ScalaTour-1.6, p. 31, Pattern Matching
# http://www.scala-lang.org/docu/files/ScalaTour-1.6.pdf
def test_scala211_pat1(self, exec2):
code = dedent('''
%scala211
object MatchTest1 extends App {
def matchTest(x: Int): String = x match {
case 1 => "one"
case 2 => "two"
case _ => "many"
}
println(matchTest(3))
}
''').strip()
exec2(code, html_pattern="defined.*object.*MatchTest1", timeout=80)
def test_scala211_pat2(self, exec2):
exec2("%scala211\nMatchTest1.main(Array())", pattern="many")
def test_scala_version(self, exec2):
exec2(
"%scala211\nutil.Properties.versionString", html_pattern="2.11.11")
class TestAnaconda5:
def test_anaconda_version(self, exec2):
exec2(
"%anaconda\nimport sys\nprint(sys.version)",
pattern=r"^3\.6\.\d+ ")
def test_anaconda_kernel_name(self, exec2):
exec2("anaconda.jupyter_kernel.kernel_name", "anaconda5")
class TestPython3Mode:
def test_p3_max(self, exec2):
exec2("%python3\nmax([],default=9)", "9", timeout=30)
def test_p3_kernel_name(self, exec2):
exec2("python3.jupyter_kernel.kernel_name", "python3")
def test_p3_version(self, exec2):
exec2(
"%python3\nimport sys\nprint(sys.version)", pattern=r"^3\.6\.\d+ ")
def test_capture_p3_01(self, exec2):
exec2(
"%capture(stdout='output')\n%python3\nimport numpy as np\nnp.arange(9).reshape(3,3).trace()"
)
def test_capture_p3_02(self, exec2):
exec2("print(output)", "12\n")
def test_p3_latex(self, exec2):
code = r"""%python3
from IPython.display import Math
Math(r'F(k) = \int_{-\infty}^{\infty} f(x) e^{2\pi i k} dx')"""
htmp = r"""F\(k\) = \\int_\{-\\infty\}\^\{\\infty\} f\(x\) e\^\{2\\pi i k\} dx"""
exec2(code, html_pattern=htmp)
def test_p3_pandas(self, exec2):
code = dedent('''
%python3
import pandas as pd
from io import StringIO
df_csv = r"""Item,Category,Quantity,Weight
Pack,Pack,1,33.0
Tent,Shelter,1,80.0
Sleeping Pad,Sleep,0,27.0
Sleeping Bag,Sleep,1,20.0
Shoes,Clothing,1,12.0
Hat,Clothing,1,2.5"""
mydata = pd.read_csv(StringIO(df_csv))
mydata.shape''').strip()
exec2(code, "(6, 4)")
def test_p3_autocomplete(self, execintrospect):
execintrospect('myd', ["ata"], 'myd', '%python3')
class TestPython3DefaultMode:
def test_set_python3_mode(self, exec2):
exec2("%default_mode python3")
def test_python3_assignment(self, exec2):
exec2("xx=[2,5,99]\nsum(xx)", "106")
def test_capture_p3d_01(self, exec2):
exec2("%capture(stdout='output')\nmax(xx)")
def test_capture_p3d_02(self, exec2):
exec2("%sage\nprint(output)", "99\n")
class TestShMode:
def test_start_sh(self, exec2):
code = "%sh\ndate +%Y-%m-%d"
patn = r'\d{4}-\d{2}-\d{2}'
exec2(code, pattern=patn)
# examples from sh mode docstring in sage_salvus.py
# note jupyter kernel text ouput is displayed as html
def test_single_line(self, exec2):
exec2("%sh uptime\n", pattern="\d\.\d")
def test_multiline(self, exec2):
exec2("%sh\nFOO=hello\necho $FOO", pattern="hello")
def test_direct_call(self, exec2):
exec2("sh('date +%Y-%m-%d')", pattern=r'\d{4}-\d{2}-\d{2}')
def test_capture_sh_01(self, exec2):
exec2("%capture(stdout='output')\n%sh uptime")
def test_capture_sh_02(self, exec2):
exec2("output", pattern="up.*user.*load average")
def test_remember_settings_01(self, exec2):
exec2("%sh FOO='testing123'")
def test_remember_settings_02(self, exec2):
exec2("%sh echo $FOO", pattern=r"^testing123\s+")
def test_sh_display(self, execblob, image_file):
execblob("%sh display < " + str(image_file), want_html=False)
def test_sh_autocomplete_01(self, exec2):
exec2("%sh TESTVAR29=xyz")
def test_sh_autocomplete_02(self, execintrospect):
execintrospect('echo $TESTV', ["AR29"], '$TESTV', '%sh')
def test_bad_command(self, exec2):
exec2("%sh xyz", pattern="command not found")
class TestShDefaultMode:
def test_start_sh_dflt(self, exec2):
exec2("%default_mode sh")
def test_multiline_dflt(self, exec2):
exec2("FOO=hello\necho $FOO", pattern="^hello")
def test_date(self, exec2):
exec2("date +%Y-%m-%d", pattern=r'^\d{4}-\d{2}-\d{2}')
def test_capture_sh_01_dflt(self, exec2):
exec2("%capture(stdout='output')\nuptime")
def test_capture_sh_02_dflt(self, exec2):
exec2("%sage\noutput", pattern="up.*user.*load average")
def test_remember_settings_01_dflt(self, exec2):
exec2("FOO='testing123'")
def test_remember_settings_02_dflt(self, exec2):
exec2("echo $FOO", pattern=r"^testing123\s+")
def test_sh_display_dflt(self, execblob, image_file):
execblob("display < " + str(image_file), want_html=False)
def test_sh_autocomplete_01_dflt(self, exec2):
exec2("TESTVAR29=xyz")
def test_sh_autocomplete_02_dflt(self, execintrospect):
execintrospect('echo $TESTV', ["AR29"], '$TESTV')
class TestRMode:
def test_r_assignment(self, exec2):
exec2("%r\nxx <- c(4,7,13)\nmean(xx)", html_pattern="^8$")
def test_r_version(self, exec2):
exec2("%r\nR.version.string", html_pattern=r"\d+\.\d+\.\d+")
def test_capture_r_01(self, exec2):
exec2("%capture(stdout='output')\n%r\nsum(xx)")
def test_capture_r_02(self, exec2):
exec2("print(output)", "24\n")
class TestRDefaultMode:
def test_set_r_mode(self, exec2):
exec2("%default_mode r")
def test_rdflt_assignment(self, exec2):
exec2("xx <- c(4,7,13)\nmean(xx)", html_pattern="^8$")
def test_dflt_capture_r_01(self, exec2):
exec2("%capture(stdout='output')\nsum(xx)")
def test_dflt_capture_r_02(self, exec2):
exec2("%sage\nprint(output)", "24\n")
class TestRWD:
"issue 240"
def test_wd0(self, exec2, data_path):
dp = data_path.strpath
code = "os.chdir('%s')" % dp
exec2(code)
def test_wd(self, exec2, data_path):
dp = data_path.strpath
exec2("%r\ngetwd()", html_pattern=dp)
class TestOctaveMode:
def test_start_octave(self, exec2):
exec2("%octave")
def test_octave_calc(self, exec2):
code = "%octave\nformat short\nbesselh(0,2)"
outp = r"ans = 0.22389\s+\+\s+0.51038i"
exec2(code, pattern=outp)
def test_octave_fibonacci(self, exec2):
code = dedent('''%octave
fib = ones (1, 10);
for i = 3:10
fib(i) = fib(i-1) + fib(i-2);
printf('%d,', fib(i))
endfor
''')
outp = '2,3,5,8,13,21,34,55,'
exec2(code, pattern=outp)
def test_octave_insync(self, exec2):
# this just confirms, that input/output is still in sync after the for loop above
exec2('%octave\n1+1', pattern='ans = 2')
class TestOctaveDefaultMode:
def test_octave_capture1(self, exec2):
exec2("%default_mode octave")
def test_octave_capture2(self, exec2):
exec2("%capture(stdout='output')\nx = [1,2]")
def test_octave_capture3(self, exec2):
exec2("%sage\nprint(output)", pattern=" 1 2")
def test_octave_version(self, exec2):
exec2("version()", pattern="4.2.2")
class TestAnaconda2019Mode:
def test_start_a2019(self, exec2):
exec2('a2019 = jupyter("anaconda2019")')
def test_issue_862(self, exec2):
exec2('%a2019\nx=1\nprint("x = %s" % x)\nx', 'x = 1\n')
def test_a2019_error(self, exec2):
exec2('%a2019\nxyz*', html_pattern='span style.*color')
class TestAnaconda5Mode:
def test_start_a5(self, exec2):
exec2('a5 = jupyter("anaconda5")')
def test_issue_862(self, exec2):
exec2('%a5\nx=1\nprint("x = %s" % x)\nx', 'x = 1\n')
def test_a5_error(self, exec2):
exec2('%a5\nxyz*', html_pattern='span style.*color')
class TestJuliaMode:
def test_julia_quadratic(self, exec2):
exec2(
'%julia\nquadratic(a, sqr_term, b) = (-b + sqr_term) / 2a\nquadratic(2.0, -2.0, -12.0)',
'2.5',
timeout=40)
def test_julia_version(self, exec2):
exec2("%julia\nVERSION", pattern=r'^v"1\.2\.\d+"', timeout=40)
|
agpl-3.0
|
dboonz/polymode
|
Polymode/Modes.py
|
5
|
53254
|
# _*_ coding=utf-8 _*_
#
#---------------------------------------------------------------------------------
#Copyright © 2009 Andrew Docherty
#
#This program is part of Polymode.
#Polymode is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------------
"""
This module contains the Mode class and and different helper functions
that manipulate the modes.
Functions in this module
------------------------
- branchsqrt
Take the correct branch of the sqrt.
- compress_modes
Compress modes to given shape
- filter_suprious_modes
Select those modes that are converged and not spurious
- filter_unique_modes
Select those modes that are unique
- construct_degenerate_pair
Construct degenerate mode pair from circularly polarized mode
- construct_lp_degenerate_pair
Construct linearly polarized mode pair
- construct_combined_mode
Construct a mode from a linear combination of other modes.
"""
from __future__ import division
import sys, time, logging
import numpy as np
import scipy as sp
from numpy import linalg as la
from numpy import fft as fft
#Import certain special cases
from numpy import pi, newaxis
from .mathlink import utf8out, misc, timer, coordinates, constants, hankel1, hankel1p, jv
from .difflounge import finitedifference
from . import Plotter, Waveguide
def branchsqrt(x):
'''
Take the correct branch of the sqrt.
The branchcut is along the -ve imaginary axis
see: M. Nevière, "Electrodynamic theory of gratings", Chap 5., 1980.
'''
argx = np.remainder(np.angle(x)+pi/2,2*pi)-pi/2
f = np.absolute(x)**0.5*np.exp(0.5j*argx)
return f
def _swap_vector(v, axis=-1, shift=0):
v = v.swapaxes(axis,0)
if shift:
#Flip the +/- frequency components in standard fft ordering
fftflip = fft.fftshift(v,axes=[0])[::-1]
#We need to roll the array by 1 is it's even
if np.mod(v.shape[0],2)==1:
v = fft.ifftshift(fftflip,axes=[0])
else:
v = fft.fftshift(np.roll(fftflip,1,axis=0),axes=[0])
else:
#Standard flip
v = v[::-1]
v = v.swapaxes(0,axis)
return v
# +-----------------------------------------------------------------------+
# | Utility Functions
# +-----------------------------------------------------------------------+
def compress_modes(modes, Nshape, wg):
"""
Compress modes to given size
Parameters
----------
modes : list
List of modes to compress
Nshape : (Nr, Nphi)
The size to compress the mode to
wg : Waveguide
The waveguide on which the modes
were solved
"""
for m in modes:
if np.iterable(m):
compress_modes(m, Nshape, wg)
else:
m.compress(Nshape, wg)
m.add_extensions()
return modes
def filter_suprious_modes(modes):
"""
Select those modes that are converged and not spurious
"""
notspurious = np.nonzero([not md.is_spurious for md in modes])[0]
converged = np.nonzero([md.is_converged() for md in modes])[0]
wanted = np.intersect1d(notspurious, converged)
modes = [modes[ii] for ii in wanted]
return modes
def filter_unique_modes(modes, cutoff=1e-8):
"""
Select those modes that are unique
"""
unique = np.ones(len(modes))
smodes = sorted(modes)
for ii in range(len(smodes)-1):
m1 = smodes[ii]
m2 = smodes[ii+1]
if np.abs(m1.evalue-m2.evalue)<abs(m1.evalue)*cutoff:
ip = np.dot(np.conj(m1.left), m2.right)
if np.abs(ip)>1e-8:
print "Repeated:",m1.neff,m2.neff
unique[ii+1] = 0
modes = [smodes[ii] for ii in np.transpose(np.nonzero(unique))]
logging.info("Filtered %d repeated modes" % (len(smodes)-len(modes)))
return modes
def construct_degenerate_pair(m1):
"""
Constrcut degenerate mode pair from circularly polarized mode
"""
if m1.m0==0:
logging.warning("Mode class 0 is non-degenerate.")
#Construct opposite circularly polarized mode
mode_class = type(m1)
m2 = mode_class(coord=m1.coord, m0=-m1.m0, wl=m1.wl, evalue=m1.evalue)
m2.exterior_index = m1.exterior_index
m2.interior_index = m1.interior_index
#Form new fields as hcalculated_electric_fielde+=conj(he-), he+=conj(he-)
rs = np.conj(m1.shape_vector(m1.right)[...,::-1])
ls = np.conj(m1.shape_vector(m1.left)[...,::-1])
#Conjugation flips fourier coefficients
m2.right = _swap_vector(rs,1,1).ravel()
m2.left = -_swap_vector(ls,1,1).ravel()
#Remember to add extensions
m2.add_extensions()
return m1,m2
def construct_lp_degenerate_pair(m):
"""
Construct linearly polarized mode pair
"""
m1c,m2c = construct_degenerate_pair(m)
#Construct LP modes as a combined vector
# so m1 =(m1c+m2c)/2
# m2 =(m1c-m2c)/2
m1 = CombinedVectorMode((m1c,m2c), (0.5,0.5), coord=m.coord, m0=0,
wl=m.wl, evalue=m.evalue)
m2 = CombinedVectorMode((m1c,m2c), (-0.5j,0.5j), coord=m.coord, m0=0,
wl=m.wl, evalue=m.evalue)
#remember to set these
m1.exterior_index = m2.exterior_index = m.exterior_index
m1.interior_index = m2.interior_index = m.interior_index
return m1,m2
def construct_combined_mode(modes, coeffs, neff=None, wl=None):
"""
Construct a mode from a linear combination of other modes.
given the modes and the coefficients the mode fields are
combined as:
F = Σᵢ aᵢ Fᵢ
Where F is the field Fᵢ is the field of the ith mode and aᵢ the
ith coefficient.
Parameters
----------
modes : list of Mode
List of modes to combine
coeffs : list of float
List of the same length as `modes` giving the coefficient
for the corresponding mode a_i
neff : complex
(optional) Effective index of the new mode, otherwise taken
from the first mode of `modes`
wl : float
(optional) Wavelength of new mode, otherwise taken
from the first mode of `modes`
"""
assert len(modes)==len(coeffs), "Number of modes and coefficients must be equal"
assert len(modes)>0, "At least one mode is required"
#Take a representitive mode
m = modes[0]
coord = m.coord
ev = (neff*m.k0)**2 if neff else m.evalue
wl = wl if wl else m.wl
mc = CombinedVectorMode(modes, coeffs, coord=coord, m0=0, wl=wl, evalue=ev)
#remember to set these
mc.exterior_index = m.exterior_index
mc.interior_index = m.interior_index
return mc
# +-----------------------------------------------------------------------+
# |
# | The Mode class
# |
# +-----------------------------------------------------------------------+
class Mode(object):
"""
Base class for a mode, to construct a mode all paramters are optional
and the mdoe information can be added later.
Parameters
---------
coord: internal coordinate object
wl: the wavelength of the mode solution
m0: the mode class
evalue: the eigenvalue, automatically converted to beta and neff
wg: the waveguide used to solve. not stored, just used to extract info
left: the left eigenvector
right: the right eigenvector
"""
def __init__(self, coord=None, wl=1, m0=1, evalue=0, wg=None,
symmetry=1, left=None, right=None):
self.m0 = m0
self.evalue = evalue
self.wl=wl
self.coord = coord
self.symmetry = symmetry
self.left = left
self.right = right
#Solver paramters
self.store_field = True
self.tolerance = 1e-9
if wg:
self.exterior_index = wg.exterior_index(wl)
self.interior_index = wg.interior_index(wl)
#Analytic extensions
self.aleft_ext = self.aright_ext = None
self.aleft_int = self.aright_int = None
#Information
self.convergence=[]
self.is_spurious = False
self.residue=0
self.label = {}
def copy(self, link_fields=False):
"""
Create new copy of mode.
Parameters
---------
link_vectors : {True, False}
share the mode information between modes to save memory
"""
newmode = self.__class__()
newmode.m0 = self.m0
newmode.evalue = self.evalue
newmode.wl = self.wl
newmode.coord = self.coord
newmode.symmetry = self.symmetry
if hasattr(self,'exterior_index'):
newmode.exterior_index = self.exterior_index
newmode.interior_index = self.interior_index
#Information
newmode.tolerance = self.tolerance
newmode.convergence = self.convergence
newmode.is_spurious = self.is_spurious
newmode.residue = self.residue
newmode.label = self.label
return newmode
def floquet(self, m0=None, coord=None):
m0 = self.m0 if m0 is None else m0
if coord is None:
return np.exp(1j*m0*self.coord.phiv)
else:
return np.exp(1j*m0*coord.phiv)
# -------------------------------------------------------------------
# Numerical properties of the Mode
# -------------------------------------------------------------------
@property
def gamma_ext(self):
"""
Calculate the mode parameter, ɣ = √[n₀² k₀² - β²]
"""
n = self.exterior_index
return branchsqrt((n*self.k0)**2-self.evalue)
@property
def gamma_int(self):
"""
Calculate the mode parameter, ɣ = √[n₀² k₀² - β²]
"""
n = self.interior_index
return branchsqrt((n*self.k0)**2-self.evalue)
@property
def k0(self):
"Return the wavenumber"
return 2*pi/self.wl
#Calulate/set neff from/to internal evalue
# *** Note - setter isn't implemented in python 2.5 ***
#@property
def get_neff(self):
"Return the effective index of the mode"
return sp.sqrt(self.evalue)/self.k0
#@neff.setter
def set_neff(self, value):
"Return the effective index of the mode"
self.evalue = value**2*self.k0**2
neff = property(get_neff, set_neff)
#@property
def get_beta(self):
"Return the modal eigenvalue, β"
return sp.sqrt(self.evalue)
#@beta.setter
def set_beta(self, value):
"Return the modal eigenvalue, β"
self.evalue = value**2
beta = property(get_beta, set_beta)
@property
def loss(self):
"Calculate the confinement loss of the mode as loss = 2×10⁷ Im{β}/ln(10) db/m"
return 2e7*np.imag(self.beta)/np.log(10)
def is_converged(self):
return self.residue<self.tolerance
# -------------------------------------------------------------------
# Mode information
# -------------------------------------------------------------------
def estimate_class(self, wg=None, core_size=None):
"Guess the core mode class as either TM, TE or Hybrid"
cutoff = 1e6
cutoff_like = 100
if core_size is None:
core_size = wg.core_size*0.75
#ec = self.coord.new( rmax=core_size, Nshape=(100,20), border=1 )
ec = coordinates.PolarCoord(rrange=(0,core_size), arange=(-pi,pi), N=(50,50), border=1)
#Look at the mode fields in the core
h = self.magnetic_field(coord=ec)
e = self.electric_field(wg, coord=ec)
modeclass = 'HE/EH_(%d,j)-like' % mod(self.m0, self.symmetry)
#All mode classes <>0 or symmetry/2 are degenerate
if self.m0==0 or self.m0==self.symmetry/2.0:
tm_factor = sp.sqrt(np.abs(h[0])**2+np.abs(h[1])**2).sum() \
/np.median(abs(h[2]))
te_factor = sp.sqrt(np.abs(e[0])**2+np.abs(e[1])**2).sum() \
/np.median(abs(e[2]))
if tm_factor/te_factor > cutoff:
modeclass = 'TM_(0,j)'
elif tm_factor/te_factor > cutoff_like:
modeclass = 'TM_(0,j)-like'
elif te_factor/tm_factor > cutoff:
modeclass = 'TE_(0,j)'
elif te_factor/tm_factor > cutoff_like:
modeclass = 'TE_(0,j)-like'
else:
modeclass += " (degenerate)"
return modeclass
def polarization_angle(self, wg=None, core_size=None):
"""
Approximate polarization angle of the mode (if linearly polarized)
Parameters
----------
wg : Waveguide
(optional) Specify the waveguide for accurate field calculations
core_size : float
(optional) Specify the radius to limit the calcuation to
"""
if core_size is None:
if wg is None:
core_size = 0.75*self.coord.rmax
else:
core_size = wg.core_size*0.75
ec = coordinates.PolarCoord(rrange=(0,core_size), arange=(-pi,pi), N=(50,50), border=1)
#Look at the mode fields in the core
hx,hy,hz = self.magnetic_field(cartesian=1, coord=ec)
xpol = la.norm(hx)
ypol = la.norm(hy)
return np.arctan2(xpol,ypol)
# -------------------------------------------------------------------
# Overloadable methods
# -------------------------------------------------------------------
def discard_fields():
pass
def store_calculated_electric_field(self, wg=None, force=False):
pass
def add_extensions(self, bc_ext=-3, bc_int=2):
pass
def compress(self, size, wg=None):
pass
def normalize(self, wg=None, coord=None):
pass
# -------------------------------------------------------------------
# Field properties
# -------------------------------------------------------------------
def poynting(self, **kwargs):
'''
The logitudinal component of the time averaged Poyting vector
S_z = 0.5 ẑ∙(e×h*)
'''
ht = self.magnetic_transverse_field(**kwargs)
et = self.electric_transverse_field(**kwargs)
Sz = 0.5*np.real(np.cross(et,np.conj(ht),axis=0))
return Sz
def _convert_polar_vector(self, v, coord, cartesian=None):
if cartesian is None: cartesian = not coord.polar_coordinate
if cartesian: v = coordinates.vector_polar_to_cartesian(v, coord)
return v
def mode_power(self, r=None, coord=None):
'''
The power in the computational region
S_z = 1/2 |Aj|² ∫ (e×h*)∙ẑ dA
'''
if coord is None: coord = self.coord
#Take a new maximum radius
if r is not None and r<self.coord.rmax:
Nshapenew = (np.int(r*coord.Nr/coord.rmax), coord.Naz)
coord = coord.new(rmax=r, Nshape=Nshapenew)
Sint = coord.int_dA(self.poynting(coord=coord))
return Sint
def mode_unconjugated_integral(self, fourier=True, coord=None):
'''
I = ∫ (e×h)∙ẑ dA
Parameters
---------
coord : Coordinate
(optional) Perform calculations with this coordinate
fourier : {True, False}
Calculate integrals in the Fourier domain
'''
if coord is None: coord = self.coord
ht = self.magnetic_transverse_field(fourier=fourier, coord=coord)
et = self.electric_transverse_field(fourier=fourier, coord=coord)
Sint = coord.int_dA(coord.cross_t(et,ht))
#Add contibution from external fields
#Does not accound for coord.symmetry currently - fix this!
extend = 0
Sext = 0
if extend or r>self.coord.rmax:
Sext = -0.5j*iprod_extend(self.aleft_ext, self.aright_ext)
if r>self.coord.rmax:
Sext -= -0.5j*iprod_extend(self.aleft_ext, self.aright_ext, rc=r)
if fourier: Sint/=coord.dphi
Sint = Sint*coord.symmetry + Sext*self.coord.dphi*self.symmetry
return Sint
def effective_area(self, coord=None):
'''
Nonlinear effective area of mode in um²
Aeff = [∫ |Sz|² dA]² / ∫ |Sz|⁴ dA
See Agrawal pg.
Parameters
---------
coord : Coordinate
(optional) Perform calculations with this coordinate
'''
if coord is None: coord = self.coord
Sz2 = self.poynting(coord=coord)**2
Aeff = coord.int_dA(Sz2)**2 / coord.int_dA(Sz2**2)
return Aeff
def numerical_aperture(self, coord=None):
'''
Numerical aperture from the Gaussian approximation for a single moded MOF.
See "Mortensen et al, "Numerical Aperture of a Single Mode Photonic Crystal Fiber"
PTL, Vol. 14, No. 8, 2002, pp 1094-1096
Parameters
---------
coord : Coordinate
(optional) Perform calculations with this coordinate
'''
NA = 1/sp.sqrt(1+pi*self.effective_area(coord=coord)/self.wl**2)
return NA
def spot_size(self, coord=None):
'''
Caculate the Petersen II spot size calculated as
spot size = [∫ Sz² dA]² / ∫ (∇Sz)² dA
Parameters
---------
coord : Coordinate
(optional) Perform calculations with this coordinate
'''
if coord is None: coord=self.coord
Sz = self.poynting(coord=coord)
DSzr, DSzphi = coord.grad_t(Sz, m0=0, fourier=False)
p2 = coord.int_dA(Sz**2)/coord.int_dA(DSzr**2 + DSzphi**2)
return sp.sqrt(p2)
def phase_velocity(self):
'''
Caculate the phase velocity (m/s) of the mode
'''
return phsycon.c*self.k0/self.beta
def group_velocity(self, wg=None, coord=None):
return constants.c/self.group_index(wg,coord)
def group_index(self, wg=None, coord=None):
'''
Caculate the group index (m/s) of the mode based on the profile
See Snyder and Love pg. 608
Parameters
---------
wg : Waveguide
(optional) Use this waveguide for the refractive index
coord : Coordinate
(optional) Perform calculations with this coordinate
'''
if wg is None:
logging.warning("Calculation being approximated as no waveguide available")
n2 = self.interior_index**2
dn2dl = 0
else:
n2 = wg.index2(wl=self.wl, coord=self.coord, resample=coord)
dn2dl = wg.material.wavelength_derivative(self.wl, units='wl')
h = self.magnetic_field(fourier=False, coord=coord)
e = self.electric_field(wg, fourier=False, coord=coord)
exh = np.cross(e[:2], np.conj(h[:2]), axis=0)
e2abs = e*np.conj(e); h2abs = h*np.conj(h)
if coord is None: coord=self.coord
ng = coord.int_dA(h2abs + (n2-self.wl*dn2dl)*e2abs)/coord.int_dA(exh)/2
return ng
def group_index_solver(self, solver, dwl=1e-5):
'''
Caculate the group index (m/s) of the mode using the solver
Parameters
---------
solver
'''
wl1 = self.wl + dwl
wl2 = self.wl - dwl
#Approximate wavelength derivative
solver.equation.setup(solver.base_shape,solver.wg,self.m0, wl1)
solver.equation.set_lambda(self.evalue)
yTx1 = np.dot(np.conj(self.left),solver.equation.matvec(self.right))
solver.equation.setup(solver.base_shape,solver.wg,self.m0, wl2)
solver.equation.set_lambda(self.evalue)
yTx2 = np.dot(np.conj(self.left),solver.equation.matvec(self.right))
#Beta derivative
solver.jacobian.setup(solver.base_shape,solver.wg,self.m0,self.wl)
solver.jacobian.set_lambda(self.evalue)
dTdb = 2*self.beta*np.dot(np.conj(self.left),solver.jacobian.matvec(self.right) - self.right)
dTdwl = 0.5*(yTx1-yTx2)/dwl
#The dispersion
dbdwl = -dTdwl/dTdb
return -dbdwl*self.wl**2/(2*pi)
def integral_propagation(self, wg=None, coord=None):
'''
Caculate the propagation constant beta of the mode based on the
mode profile, requires the waveguide
Parameters
---------
wg : Waveguide
(optional) Use this waveguide for the refractive index
coord : Coordinate
(optional) Perform calculations with this coordinate
'''
if wg is None:
logging.warning("Calculation being approximated as no waveguide available")
n2 = self.interior_index**2
else:
n2 = wg.index2(wl=self.wl, coord=self.coord, resample=coord)
hr,hphi,hz = self.magnetic_field(fourier=0, coord=coord)
er,ephi,ez = self.electric_field(wg, fourier=0, coord=coord)
exh = np.cross((er,ephi), np.conj((hr,hphi)), axis=0)
e2abs = er*np.conj(er)+ephi*np.conj(ephi)+ez*np.conj(ez)
h2abs = hr*np.conj(hr)+hphi*np.conj(hphi)+hz*np.conj(hz)
if coord is None: coord=self.coord
beta = (2*self.k0)*coord.int_dA(n2*exh)/coord.int_dA(h2abs + np.conj(n2)*e2abs)
return beta
# -------------------------------------------------------------------
# Plotting
# -------------------------------------------------------------------
def plot(self, plottype='Sz', style='pcolor', part='real', cartesian=None, wg=None,
Nx=None, sectors=None , rmin=None, rmax=None, cmap=None, coord=None, style1d='-',
title=r"%(type)s, $n_{\mathrm{eff}}=%(tneff)s$"):
"""Plot the mode.
Paramters
---------
plottype : {'vector', 'Sz', 'Hz', 'Ez, 'Hr, 'Ha', 'Er, 'Ea', 'Ex', 'Ey', 'Hx', 'Hy'}
Specify what to plot, default is 'Sz'
style : {'contour', 'pcolor', 'line', '3d'}
The plot style to use
part : {'real'*, 'imag', 'abs', 'phase', 'log'}
Plot a specific part of the `plottype` data
rmax : float
Maxiumum plot radius
Nx : (Nr, Nphi) or (Nx, Ny)
Plot at this resolution
style1d : matplotlib style
Line style for 1d plotting
cmap : color map
Colour map to plot (from pylab.cm)
wg : Waveguide
Used if Hz, Ez is plotted.
title : string
Custom title for the plot
"""
plottype = plottype.lower()
plotstyle={}
color = None
#if not self.has_mode_data():
# raise RuntimeError, "No vectors stored in this mode, cannot plot"
symm = self.symmetry
#If a coord is not specified we need to guess a good one
if coord is None:
#Resample output to a suitable size
if Nx is None:
if plottype.startswith('vect') or plottype in ['ph','pe']:
cartesian = 1
Nx = (20,20)
else:
if cartesian:
Nx = (100,100) #Default cartesian resolution
elif self.coord.shape is None or self.coord.shape[1]==1:
Nx = (200, 1) #Keep radial coordintates
else:
Nx = (50, 100) #Default polar resolution
if rmax is None: rmax = self.coord.rrange[1]
if rmin is None: rmin = 0
dT = pi if sectors is None else sectors*pi/symm
#New plotcoord for resampled plotting
if cartesian:
plotcoord = coordinates.CartesianCoord(X=rmax, Y=rmax, N=Nx)
else:
plotcoord = coordinates.PolarCoord(rrange=(rmin,rmax), arange=(-dT,dT), N=Nx, border=0)
else:
plotcoord = coord
#Choose what to plot
if plottype == 'sz' or plottype == 'power':
plotdata = (self.poynting(coord=plotcoord))
elif plottype == 'szc':
plotdata = (self._calculated_poynting(wg=wg, coord=plotcoord))
elif plottype == 'vector' or plottype == 'vectorh' or style == 'vector':
plotdata = self.magnetic_transverse_field(cartesian=1, coord=plotcoord)
color='black'; plottype = 'H-Vector'; style = 'vector'
elif plottype == 'vectore':
plotdata = self.electric_transverse_field(cartesian=1, coord=plotcoord)
color='blue'; plottype = 'E-Vector'; style = 'vector'
elif plottype == 'ph':
plotdata = self.magnetic_transverse_field(cartesian=1, coord=plotcoord)
color='black'; plottype = 'H-Polarization'; style = 'circ'; part='all'
elif plottype == 'pe':
plotdata = self.electric_transverse_field(cartesian=1, coord=plotcoord)
plotstyle['ec']='blue'; plottype = 'E-Polarization'; style = 'circ'; part='all'
elif plottype == 'hz':
plotdata = self.magnetic_field(coord=plotcoord)[-1]
elif plottype == 'ez':
plotdata = self.electric_field(wg, coord=plotcoord)[-1]
elif plottype == 'hr':
plotdata = self.magnetic_transverse_field(cartesian=0, coord=plotcoord)[0]
elif plottype == 'ha':
plotdata = self.magnetic_transverse_field(cartesian=0, coord=plotcoord)[1]
elif plottype == 'er':
plotdata = self.electric_transverse_field(cartesian=0, coord=plotcoord)[0]
elif plottype == 'ea':
plotdata = self.electric_transverse_field(cartesian=0, coord=plotcoord)[1]
elif plottype == 'erc':
plotdata = self.calculated_electric_field(wg, coord=plotcoord)[0]
elif plottype == 'eac':
plotdata = self.calculated_electric_field(wg, coord=plotcoord)[1]
elif plottype=='hx':
plotdata = self.magnetic_transverse_field(cartesian=1, coord=plotcoord)[0]
elif plottype=='hy':
plotdata = self.magnetic_transverse_field(cartesian=1, coord=plotcoord)[1]
elif plottype == 'ex':
plotdata = self.electric_transverse_field(cartesian=1, coord=plotcoord)[0]
elif plottype == 'ey':
plotdata = self.electric_transverse_field(cartesian=1, coord=plotcoord)[1]
else:
logging.error("Plottype isn't recognised")
return
#Select real, imag or abs parts
parts = part.replace(',',' ').split()
if 'abs' in parts: plotdata = np.abs(plotdata)
elif 'phase' in parts: plotdata = np.arctan2(np.real(plotdata),np.imag(plotdata))
elif 'imag' in parts: plotdata = np.imag(plotdata)
elif 'real' in parts: plotdata = np.real(plotdata)
if 'log' in parts: plotdata = np.log(np.abs(plotdata))
#2D or 1D plot
if 'vector' in style or plotdata.shape[1]>1:
Plotter.plot_v( plotcoord, plotdata, style=style, cmap=cmap, color=color)
else:
Plotter.plot(plotcoord.rv, plotdata.squeeze(), style1d)
Plotter.xlabel('Radial distance, r')
#Format the title
tdata = {'type':plottype,
'rneff':np.real(self.neff),
'ineff':np.imag(self.neff),
'loss':self.loss,
'm0':self.m0}
tdata['tneff'] = misc.format_complex_latex(self.neff)
Plotter.title( title % tdata )
return plotcoord
def info(self, wg=None):
"""Print information about the current mode, if the waveguide
is given as an argument more information will be given
"""
info_str = self.__str__().decode('utf8') + "\n"
info_str += u" | Effective area: %.5g μm²\n" % self.effective_area()
info_str += u" | Spot size: %.5g μm\n" %(self.spot_size())
info_str += u" | Single moded numerical aperture: %.5g\n" %(self.numerical_aperture())
if self.is_spurious:
info_str += " | Possible spurious mode\n"
#Print extra information if wg given
if wg:
rc = wg.core_size
info_str += " | Group index: %s m/s\n" % self.group_index(wg)
info_str += " | Mode class: %s\n" % self.estimate_class(wg)
info_str += " | Power in core: %.5g%%\n" % (100*self.mode_power(r=rc)/self.mode_power())
print utf8out(info_str)
# -------------------------------------------------------------------
# Misc functions for 2 Modes
# -------------------------------------------------------------------
def __cmp__(self,other):
"Compare two Modes, based on eigenvalue"
if hasattr(other, "neff"):
return cmp(self.neff,other.neff)
else:
return cmp(self.neff,other)
def __mul__(self, other):
"Construct numerical innerproduct as this.L dot that.R:"
if hasattr(self,'right') and hasattr(other,'left'):
return sum(self.right*other.left)
else:
raise IndexError, "Modes do not have left & right members!"
def __str__(self):
info_dict = {}
info_dict['res'] = np.max(np.atleast_1d(self.residue))
info_dict['userlab'] = ", ".join(["%s: %s" % (x,self.label[x]) for x in self.label])
info_dict['m0'] = self.m0
info_dict['wl'] = self.wl
if self.coord is None:
info_dict['shape'] = info_dict['symm'] = info_dict['rmax'] = "?"
else:
info_dict['shape'] = self.coord.shape
info_dict['rmin'], info_dict['rmax'] = self.coord.polar_bounds()[:2]
info_dict['symm'] = "C%d" % self.symmetry
#Construct information string
info_str = u"Mode, size: %(shape)s, symmetry: %(symm)s, m₀: %(m0)d\n" % info_dict
info_str += u"λ: %(wl).4g, r: %(rmin).3g -> %(rmax).3g, res: %(res).2g\n" % info_dict
info_str += u"neff=%s, loss=%.4gdB/m, %s" % (misc.format_complex(self.neff), self.loss, info_dict['userlab'])
return utf8out(info_str)
def __repr__(self):
res = np.max(np.atleast_1d(self.residue))
slab = ("","S")[self.is_spurious]
clab = ("E","")[self.is_converged()]
userlab = ", ".join(["%s: %s" % (x,self.label[x]) for x in self.label])
info_str = u"<%s: m₀=%d λ=%.4g neff=%s r:%.2g %s [%s%s]>" \
% (self.__class__.__name__, self.m0, self.wl, \
misc.format_complex(self.neff), res, userlab, slab, clab)
return utf8out(info_str)
# +-----------------------------------------------------------------------+
# | Mode class for the Scalar Wave Equation
# +-----------------------------------------------------------------------+
class ScalarMode(Mode):
pass
# +-----------------------------------------------------------------------+
# | Mode class for the Fourier Decomposition Method
# +-----------------------------------------------------------------------+
#class FourierMode(Mode):
class VectorMode(Mode):
pmax = 2
r_axis=-2
az_axis=-1
pv = np.array([1, -1])
reverse_left = False
reverse_right = False
reverse_p = False
def copy(self, link_fields=False):
"""
Create new copy of mode.
link_vectors: share the mode information between modes
to save memory
"""
newmode = Mode.copy(self, link_fields)
#Copy vectors or link them
if link_fields or self.right is None:
newmode.right = self.right
else:
newmode.right = self.right.copy()
if link_fields or self.left is None:
newmode.left = self.left
else:
newmode.left = self.left.copy()
return newmode
# +-----------------------------------------------------------------------+
# | Pickling marshalling functions
# | Compress the mode before saving if required
# +-----------------------------------------------------------------------+
def __getstate__(self):
"Pickle all needed data, ignore cached data"
state = self.__dict__.copy()
ignore_list = ["Mdtn"]
for ignore in ignore_list:
if ignore in state:
del state[ignore]
if not self.store_field:
state['left'] = state['right'] = None
return state
def __setstate__(self,state):
"Restore pickled data"
self.__dict__.update(state)
# -------------------------------------------------------------------
# Vector access routines
# -------------------------------------------------------------------
def add_extensions(self, bc_ext=-3, bc_int=2):
"""
Set up the extensions to model the fields
at locations below r_min and above r_max
"""
if hasattr(self,'right') and self.right is not None:
self.aright_ext = AnalyticExtension(bc_ext)
self.aright_int = AnalyticExtension(bc_int, interior=1)
self.aright_ext.set_extension_mode(self)
self.aright_int.set_extension_mode(self)
if hasattr(self,'left') and self.left is not None:
self.aleft_ext = AnalyticExtensionLeft(bc_ext)
self.aleft_int = AnalyticExtensionLeft(bc_int, interior=1)
self.aleft_ext.set_extension_mode(self)
self.aleft_int.set_extension_mode(self)
def compress(self, size, wg=None):
"Replace the calculated vectors with resampled versions"
astype = np.complex64
if size is None or size==self.coord.shape:
logging.warning("Compressing to same size as mode, doing nothing")
return
newcoord = wg.get_coord(size, border=1)
newshape = (newcoord.Nr, newcoord.Naz, self.pmax)
#Resample the right mode vector if there is one
right = self.right
left = self.left
if right is not None:
right = self.shape_vector(right).transpose((2,0,1))
right = newcoord.fourier_resample(right, self.coord, fourier=True)
right = self.unshape_vector(right.transpose((1,2,0))).astype(astype)
#Resample the left mode vector if there is one
if left is not None:
left = self.shape_vector(left).transpose((2,0,1))
left = newcoord.fourier_resample(left, self.coord, fourier=True)
left = self.unshape_vector(left.transpose((1,2,0))).astype(astype)
self.coord = newcoord
self.left = left
self.right = right
self.add_extensions()
def get_right(self, swap=False):
"""
Return the right eigenvector of the mode.
swap: swap the Fourier frequencies m⟶-m
"""
if self.right is None:
raise RuntimeError, "No right eigenvector is available!"
#Shape the vector correctly
mdata = np.rollaxis(self.shape_vector(self.right),2)
if swap:
mdata = _swap_vector(mdata, self.az_axis, 1)
return mdata
def get_left(self, field=True, swap=False):
"""
Return the left eigenvector of the mode.
field: return mode corrected to electric field of mode
swap: swap the Fourier frequencies m⟶-m
"""
if self.left is None:
raise RuntimeError, "No left eigenvector is available!"
#Shape the vector correctly
mdata = np.rollaxis(self.shape_vector(self.left),2)
#'Fix' up left field as electric field
# so E+ = L+/r, E- = -L-/r
if field:
mdata = mdata/self.coord.rv[:,newaxis]
mdata *= self.pv[:,newaxis,newaxis]
if self.reverse_p:
mdata = np.conj(mdata)
mdata = mdata[::-1]
if swap:
mdata = _swap_vector(mdata, self.az_axis, 1)
return mdata
def transform_left(self, factor=1):
if self.reverse_p:
self.left *= np.conj(factor)
else:
self.left *= factor
def transform_right(self, factor=1):
self.right *= factor
def shape_vector(self,v):
"Return vector shaped as (r,phi,p)"
shape = self.coord.shape+(2,)
v = v.reshape(shape)
return v
def unshape_vector(self,v):
"Return vector shaped for storage"
return v.ravel()
@property
def shape(self):
shape = self.coord.shape+(self.pmax,)
return shape
def discard_fields(self):
"Discard all field information"
self.left = None
self.right = None
self.aleft_int = self.aleft_ext = None
self.aright_int = self.aright_ext = None
def normalize(self, by='field', arg=0, wg=None, coord=None):
"""
Normalize the fields so the electric and magnetic vectors have the correct
correspondance, including the intrinsic impedence of free space:
H = Htrue, E = (e0/mu0)^1/2 Etrue
"""
if self.coord is None or self.right is None: return
#Normalize largest component of magnetic transverse vector to be real
#ht = self.magnetic_transverse_field(fourier=1)
#self.right /= exp(1j*arg)*misc.absmax(ht)/abs(misc.absmax(ht))
if self.left is None: return
#Normalize E/H fields:
P0 = self.mode_power(coord=coord)
enorm = None
if by=='poynting':
enorm = np.conj(misc.absmax(self.poynting(coord=coord)))
elif by=='ext':
self.add_extensions()
enorm = self._normalize_electric_field_extension()
elif by=='field':
enorm = self._normalize_electric_field(wg=wg,coord=coord)
#Otherwise just use the power
if enorm is None or not np.isfinite(enorm):
enorm = 1./P0
#Normalize absolute power so |P|=1
#Can't normalize power phase and field relationship simultaneously
b = sp.sqrt(np.abs(P0*enorm))
self.transform_right(1/b)
self.transform_left(enorm/b)
#Update analytic extensions
self.add_extensions()
#Recalulate power & field for information
Pang = np.angle(self.mode_power(coord=coord))
logging.debug(u"Normalized mode to power angle ∠%.3gπ" % (Pang/pi))
return enorm
def _normalize_electric_field(self, wg, fourier=True, coord=None):
"""
Normalize Electric/Magnetic components so they have the correct relationship
using the analytic extension
H = Htrue, E = (e0/mu0)^1/2 Etrue
"""
#Select the internal nodes only (electric vector will be incorrect at the boundary
#nodes
E = self.electric_transverse_field(fourier=fourier, coord=coord)[:,2:-2]
Ec = self.calculated_electric_field(wg=wg, fourier=fourier, coord=coord)[:2,2:-2]
xselect = np.absolute(E).argmax()
enorm = np.array(Ec).flat[xselect]/np.array(E).flat[xselect]
return enorm
def _normalize_electric_field_extension(self):
"""
Normalize Electric/Magnetic components so they have the correct relationship
using the analytic extension
H = Htrue, E = (e0/mu0)^1/2 Etrue
"""
ap,am = self.aleft_ext.alphac
bp,bm = self.aright_ext.alphac
#The field coefficients should be such that this is one
chi = (bp-bm)/(am+ap)/(1j*self.beta/self.k0)
w = np.abs(bp-bm)**2+np.abs(am+ap)**2
#Check that we aren't comparing two zero numbers
if np.mean(w)<1e-10:
logging.error("Extension norm failed!")
return None
#Calculate the weighted average if more than one Fourier component
if np.shape(w)[0]>1:
#Select only low frequency components
msl = np.absolute(self.aright_ext.msx).max(axis=0)<20
bnorm = np.trapz(chi[msl]*w[msl])/np.trapz(w[msl])
else:
bnorm = chi
return bnorm
def _resample_vector(self, v, ext=(None,None), fourier=False, cartesian=None, coord=None):
"Resample raw vectors to new coordinate"
#Resample if coord is given and is not self.coord
if coord and coord!=self.coord:
v = coord.fourier_resample(v, self.coord, ext=ext, m0=self.m0, fourier=fourier)
elif not fourier:
v = fft.ifft(v, axis=self.az_axis)*self.floquet()
#Convert to cartesian if specified
if coord is None: coord=self.coord
if cartesian is None: cartesian = not coord.polar_coordinate
if cartesian: v = coordinates.vector_polar_to_cartesian(v, coord)
return v
def calculated_electric_field(self, wg=None, fourier=False, cartesian=False, coord=None):
"The transverse electric vector calculated from the internal H field"
if wg is None:
logging.warning("CEF Electric field calculation being approximated as no waveguide available")
n2 = self.interior_index**2
else:
n2 = wg.index2(wl=self.wl, coord=self.coord)
hr,hphi,hz = self.magnetic_field(fourier=1)
Dhzr,Dhzphi = self.coord.grad_t(hz, m0=self.m0, fourier=1)
er = fft.ifft(self.beta*hphi+1j*Dhzphi, axis=1)/(self.k0*n2)
ephi = -fft.ifft(self.beta*hr+1j*Dhzr, axis=1)/(self.k0*n2)
ez = fft.ifft(1j*self.coord.curl_t((hr,hphi), fourier=1, m0=self.m0))/(self.k0*n2)
e = np.array([er,ephi,ez])
e = self._resample_vector(fft.fft(e,axis=-1), fourier=fourier, cartesian=cartesian, coord=coord)
return e
def store_calculated_electric_field(self, wg=None, force=False):
'''
Calculate the electric vector from the internal H field
and save it as the internal electric vector
'''
if not (self.left is None or force):
return
logging.debug("Calculating electric field")
if wg is None:
logging.warning("Electric field calculation being approximated as no waveguide available")
n2 = self.interior_index**2
else:
n2 = wg.index2(wl=self.wl, coord=self.coord)
hr,hphi,hz = self.magnetic_field(fourier=1)
Dhzr,Dhzphi = self.coord.grad_t(hz, m0=self.m0, fourier=1)
#Caclulate transverse electric field from Maxwell's equations
er = fft.ifft(self.beta*hphi+1j*Dhzphi, axis=1)/(self.k0*n2)
ephi = -fft.ifft(self.beta*hr+1j*Dhzr, axis=1)/(self.k0*n2)
#Calculate vector
ev = np.array([er+ephi*1j,er-ephi*1j])*self.coord.rv[:,newaxis]/self.pv[:,newaxis,newaxis]
ev = fft.fft(ev,axis=-1)
#Reverse if called for
if self.reverse_left:
ev = _swap_vector(ev, -1, 1)
self.left = self.unshape_vector(ev.transpose((1,2,0)))
def magnetic_transverse_field(self, fourier=False, cartesian=None, coord=None):
'''
The transverse magnetic field, calculated from the internal H⁺,H⁻"
cartesian=False returns h_t=(h_r,h_ϕ)
cartesian=True returns h_t=(h_x,h_y)
'''
hp,hm = self.get_right(swap=self.reverse_right)
ht = np.asarray([(hp+hm)/2, (hp-hm)/2j])
ext = (self.aright_int, self.aright_ext)
ht = self._resample_vector(ht, ext, fourier, cartesian, coord)
return ht
def magnetic_field(self, fourier=False, cartesian=None, coord=None):
"""
The three component magnetic field (h_r,h_ϕ,h_z) or (hx,hy,hz)
"""
hr,ha = self.magnetic_transverse_field(fourier=1)
hz = 1j/self.beta * self.coord.div_t((hr,ha), fourier=1, m0=self.m0)
h = np.asarray([hr,ha,hz])
ext = (self.aright_int, self.aright_ext)
h = self._resample_vector(h, ext, fourier, cartesian, coord)
return h
def electric_transverse_field(self, fourier=False, cartesian=None, coord=None, wg=None):
'''
The transverse electric field, calculated from the internal E⁺,E⁻"
cartesian=False returns e_t=(e_r,e_ϕ)
cartesian=True returns e_t=(e_x,e_y)
if calculated_electric_field is true then calculate from the the magnetic field
'''
if self.left is None:
#return self.calculated_electric_field(wg=wg, fourier=fourier, cartesian=cartesian, coord=coord)[:2]
self.left = self._calculate_electric_vector(wg)
ep,em = self.get_left(field=1, swap=self.reverse_left)
et = np.asarray([(ep+em)/2, (ep-em)/2j])
ext = (self.aleft_int, self.aleft_ext)
et = self._resample_vector(et, ext, fourier, cartesian, coord)
return et
def electric_field(self, wg=None, fourier=False, cartesian=None, coord=None):
"""
The three component electric field (e_r,e_ϕ,e_z)
if calculated_electric_field is true then calculate from the the magnetic field
"""
if self.left is None:
#return self.calculated_electric_field(wg=wg, fourier=fourier, cartesian=cartesian, coord=coord)[:2]
self.left = self._calculate_electric_vector(wg)
if wg is None:
logging.warning("Electric field calculation being approximated as no waveguide available")
n2 = self.interior_index**2
else:
n2 = wg.index2(wl=self.wl, coord=self.coord)
hr,ha = self.magnetic_transverse_field(fourier=1)
er,ea = self.electric_transverse_field(fourier=1)
ez = fft.fft(1j/(self.k0*n2) * fft.ifft(self.coord.curl_t((hr,ha), fourier=1, m0=self.m0), axis=1),axis=1)
e = np.asarray([er,ea,ez])
ext = (self.aleft_int, self.aleft_ext)
e = self._resample_vector(e, ext, fourier, cartesian, coord)
return e
# +-----------------------------------------------------------------------+
#
# A cheap way to create linear combinations of modal fields
# without worrying about the technical details
#
# +-----------------------------------------------------------------------+
class CombinedVectorMode(VectorMode):
def __init__(self, modelist, coeffs, **kwargs):
VectorMode.__init__(self, **kwargs)
self.modes = modelist
self.fieldcoeffs = coeffs
def magnetic_transverse_field(self, *args, **kwargs):
'''
The transverse magnetic field, calculated from combined modes
cartesian=False returns h_t=(h_r,h_ϕ)
cartesian=True returns h_t=(h_x,h_y)
'''
ht=None
for m,c in zip(self.modes, self.fieldcoeffs):
if ht is None:
ht = c*m.magnetic_transverse_field(*args, **kwargs)
else:
ht += c*m.magnetic_transverse_field(*args, **kwargs)
return ht
def electric_transverse_field(self, *args, **kwargs):
'''
The transverse electric field, calculated from the internal E⁺,E⁻"
cartesian=False returns e_t=(e_r,e_ϕ)
cartesian=True returns e_t=(e_x,e_y)
'''
et=None
for m,c in zip(self.modes, self.fieldcoeffs):
if et is None:
et = c*m.electric_transverse_field(*args, **kwargs)
else:
et += c*m.electric_transverse_field(*args, **kwargs)
return et
def magnetic_field(self, *args, **kwargs):
"The three component magnetic field (h_r,h_ϕ,h_z) or (hx,hy,hz)"
h=None
for m,c in zip(self.modes, self.fieldcoeffs):
if h is None:
h = c*m.magnetic_field(*args, **kwargs)
else:
h += c*m.magnetic_field(*args, **kwargs)
return h
def electric_field(self, *args, **kwargs):
"The three component electric field (e_r,e_ϕ,e_z)"
e=None
for m,c in zip(self.modes, self.fieldcoeffs):
if e is None:
e = c*m.electric_field(*args, **kwargs)
else:
e += c*m.electric_field(*args, **kwargs)
return e
# +-----------------------------------------------------------------------+
#
# Class for calulating the mode in the region beyond
# the computational boundary.
#
# +-----------------------------------------------------------------------+
class AnalyticExtension:
'''
Class for modeling the mode behaviour in the region beyond the computational boundary.
'''
def __init__(self, bcnode=-1, factor=1, interior=False):
self.gamma = 0
self.bc_node = bcnode
self.interior = interior
self.factor=factor
if interior:
self.bcoeffs = (0,0,1)
else:
self.bcoeffs = (1,0,0)
def bessel(self, m, x):
y = 0
if self.bcoeffs[0]!=0:
y += self.bcoeffs[0]*hankel1(m,x)
if self.bcoeffs[1]!=0:
y += self.bcoeffs[1]*hankel1(m,x)
if self.bcoeffs[2]!=0:
y += self.bcoeffs[2]*jv(m,x)
#Make sure we don't get NaN's for large orders
np.place(y, np.isnan(y), 0)
return y
def set_extension_mode(self, mode):
self.factor = mode.beta
if self.interior: gamma = mode.gamma_int
else: gamma = mode.gamma_ext
mdata = mode.get_right(swap=mode.reverse_right)
return self.set_extension(mdata, mode.m0, mode.pv, mode.coord, gamma)
def set_extension(self, mdata, m0, p, coord, gamma):
self.rc = coord.rv[self.bc_node]
self.ms, self.phi = coord.ms, coord.phiv
self.m0, self.p = m0, p
self.msx = self.ms + m0 + p[:,newaxis]
#Take the points (in phi and p) at the correct r location
#Note, need to copy it here, not have it linked to the main array
self.mdata = mdata[...,self.bc_node,:] + 0
self.gamma = gamma
return self
#Calculate analytic coefficients using the value of the mode at the boundary
def calc_alphac(self, gamma=None):
if gamma is None: gamma = self.gamma
b = self.bessel(self.msx, gamma*self.rc)
bsel = abs(b)>1e-12
alphac = np.zeros_like(self.mdata)
alphac[bsel] = self.mdata[bsel]/b[bsel]
return alphac
alphac = property(calc_alphac)
def __call__(self, rext, fourier=True):
"Return external fields calculated at r radial coordinates"
ms = self.msx[...,newaxis]
ext = self.alphac[...,newaxis]*self.bessel(ms,self.gamma*rext)
ext = np.rollaxis(ext,2)
if fourier:
return ext
else:
extp = fft.ifft(ext, axis=-1)
return extp*np.exp(1j*self.m0*self.phi)
def vector(self, rext, fourier=True):
"Return r,phi,z vectors reconstructed from +/- coefficients, factor=beta"
msp,msm = self.msx
ap, am = self.alphac
hr = 0.5*(ap*self.bessel(msp, self.gamma*rext) + am*self.bessel(msm, self.gamma*rext))
hp = -0.5j*(ap*self.bessel(msp, self.gamma*rext) - am*self.bessel(msm, self.gamma*rext))
hz = 1j*self.gamma/self.factor/2*(ap-am)*self.bessel(self.ms, self.gamma*rext)
return np.asarray([hr,hp,hz])
class AnalyticExtensionLeft(AnalyticExtension):
def set_extension_mode(self, mode):
if self.interior:
gamma = mode.gamma_int
self.factor = mode.k0*mode.interior_index**2
else:
gamma = mode.gamma_ext
self.factor = mode.k0*mode.exterior_index**2
mdata =mode.get_left(field=True, swap=mode.reverse_left)
return self.set_extension(mdata, mode.m0, mode.pv, mode.coord, gamma)
def vector(self, rext, fourier=True):
"Return r,phi,z vectors reconstructed from +/- coefficients, factor=k n^2"
msp, msm = self.msx
ap, am = self.alphac
er = 0.5*(ap*self.bessel(msp, self.gamma*rext) + am*self.bessel(msm, self.gamma*rext))
ep = -0.5j*(ap*self.bessel(msp, self.gamma*rext) - am*self.bessel(msm, self.gamma*rext))
ez = 1j*self.gamma/self.factor/2*(ap-am)*self.bessel(self.ms, self.gamma*rext)
return np.asarray([er,ep,ez])
|
gpl-3.0
|
CameronTEllis/brainiak
|
brainiak/funcalign/sssrm.py
|
3
|
29287
|
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semi-Supervised Shared Response Model (SS-SRM)
The implementations are based on the following publications:
.. [Turek2016] "A Semi-Supervised Method for Multi-Subject fMRI Functional
Alignment",
J. S. Turek, T. L. Willke, P.-H. Chen, P. J. Ramadge
IEEE International Conference on Acoustics, Speech and Signal Processing
(ICASSP), 2017, pp. 1098-1102.
https://doi.org/10.1109/ICASSP.2017.7952326
"""
# Authors: Javier Turek (Intel Labs), 2016
import logging
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin
from sklearn.utils import assert_all_finite
from sklearn.utils.validation import NotFittedError
from sklearn.utils.multiclass import unique_labels
import theano
import theano.tensor as T
import theano.compile.sharedvalue as S
from pymanopt.manifolds import Euclidean
from pymanopt.manifolds import Product
from pymanopt.solvers import ConjugateGradient
from pymanopt import Problem
from pymanopt.manifolds import Stiefel
import gc
from brainiak.utils import utils
from brainiak.funcalign import srm
__all__ = [
"SSSRM",
]
logger = logging.getLogger(__name__)
class SSSRM(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Semi-Supervised Shared Response Model (SS-SRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject, using also labeled
data to train a Multinomial Logistic Regression (MLR) classifier (with
l2 regularization) in a semi-supervised manner:
.. math::
(1-\\alpha) Loss_{SRM}(W_i,S;X_i)
+ \\alpha/\\gamma Loss_{MLR}(\\theta, bias; {(W_i^T \\times Z_i, y_i})
+ R(\\theta)
:label: sssrm-eq
(see Equations (1) and (4) in [Turek2016]_).
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
gamma : float, default: 1.0
Regularization parameter for the classifier.
alpha : float, default: 0.5
Balance parameter between the SRM term and the MLR term.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
theta_ : array, shape=[classes, features]
The MLR class plane parameters.
bias_ : array, shape=[classes]
The MLR class biases.
classes_ : array of int, shape=[classes]
Mapping table for each classes to original class label.
random_state_: `RandomState`
Random number generator initialized using rand_seed
Note
----
The number of voxels may be different between subjects. However, the
number of samples for the alignment data must be the same across
subjects. The number of labeled samples per subject can be different.
The Semi-Supervised Shared Response Model is approximated using the
Block-Coordinate Descent (BCD) algorithm proposed in [Turek2016]_.
This is a single node version.
"""
def __init__(self, n_iter=10, features=50, gamma=1.0, alpha=0.5,
rand_seed=0):
self.n_iter = n_iter
self.features = features
self.gamma = gamma
self.alpha = alpha
self.rand_seed = rand_seed
return
def fit(self, X, y, Z):
"""Compute the Semi-Supervised Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
y : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in Z.
Z : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
for training the MLR classifier.
"""
logger.info('Starting SS-SRM')
# Check that the alpha value is in range (0.0,1.0)
if 0.0 >= self.alpha or self.alpha >= 1.0:
raise ValueError("Alpha parameter should be in range (0.0, 1.0)")
# Check that the regularizer value is positive
if 0.0 >= self.gamma:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1 or len(y) <= 1 or len(Z) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
if not (len(X) == len(y)) or not (len(X) == len(Z)):
raise ValueError("Different number of subjects in data.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
# and if alignment and classification data have the same number of
# voxels per subject. Also check that there labels for all the classif.
# sample
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
assert_all_finite(Z[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment samples "
"between subjects.")
if X[subject].shape[0] != Z[subject].shape[0]:
raise ValueError("Different number of voxels between alignment"
" and classification data (subject {0:d})"
".".format(subject))
if Z[subject].shape[1] != y[subject].size:
raise ValueError("Different number of samples and labels in "
"subject {0:d}.".format(subject))
# Map the classes to [0..C-1]
new_y = self._init_classes(y)
# Run SS-SRM
self.w_, self.s_, self.theta_, self.bias_ = self._sssrm(X, Z, new_y)
return self
def _init_classes(self, y):
"""Map all possible classes to the range [0,..,C-1]
Parameters
----------
y : list of arrays of int, each element has shape=[samples_i,]
Labels of the samples for each subject
Returns
-------
new_y : list of arrays of int, each element has shape=[samples_i,]
Mapped labels of the samples for each subject
Note
----
The mapping of the classes is saved in the attribute classes_.
"""
self.classes_ = unique_labels(utils.concatenate_not_none(y))
new_y = [None] * len(y)
for s in range(len(y)):
new_y[s] = np.digitize(y[s], self.classes_) - 1
return new_y
def transform(self, X, y=None):
"""Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects.
y : not used as it only applies the mappings
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def predict(self, X):
"""Classify the output for given data
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
The number of voxels should be according to each subject at
the moment of training the model.
Returns
-------
p: list of arrays, element i has shape=[samples_i]
Predictions for each data sample.
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
X_shared = self.transform(X)
p = [None] * len(X_shared)
for subject in range(len(X_shared)):
sumexp, _, exponents = utils.sumexp_stable(
self.theta_.T.dot(X_shared[subject]) + self.bias_)
p[subject] = self.classes_[
(exponents / sumexp[np.newaxis, :]).argmax(axis=0)]
return p
def _sssrm(self, data_align, data_sup, labels):
"""Block-Coordinate Descent algorithm for fitting SS-SRM.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
classes = self.classes_.size
# Initialization:
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2**32))
for i in range(len(data_align))]
# Set Wi's to a random orthogonal voxels by TRs
w, _ = srm._init_w_transforms(data_align, self.features, random_states)
# Initialize the shared response S
s = SSSRM._compute_shared_response(data_align, w)
# Initialize theta and bias
theta, bias = self._update_classifier(data_sup, labels, w, classes)
# calculate and print the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup, labels,
w, s, theta, bias)
logger.info('Objective function %f' % objective)
# Main loop:
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update the mappings Wi
w = self._update_w(data_align, data_sup, labels, w, s, theta, bias)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating Wi %f'
% objective)
# Update the shared response S
s = SSSRM._compute_shared_response(data_align, w)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating S %f'
% objective)
# Update the MLR classifier, theta and bias
theta, bias = self._update_classifier(data_sup, labels, w, classes)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating MLR %f'
% objective)
return w, s, theta, bias
def _update_classifier(self, data, labels, w, classes):
"""Update the classifier parameters theta and bias
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of 2D array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
classes : int
The number of classes in the classifier.
Returns
-------
theta : array, shape=[features, classes]
The MLR parameter for the class planes.
bias : array shape=[classes,]
The MLR parameter for class biases.
"""
# Stack the data and labels for training the classifier
data_stacked, labels_stacked, weights = \
SSSRM._stack_list(data, labels, w)
features = w[0].shape[1]
total_samples = weights.size
data_th = S.shared(data_stacked.astype(theano.config.floatX))
val_ = S.shared(labels_stacked)
total_samples_S = S.shared(total_samples)
theta_th = T.matrix(name='theta', dtype=theano.config.floatX)
bias_th = T.col(name='bias', dtype=theano.config.floatX)
constf2 = S.shared(self.alpha / self.gamma, allow_downcast=True)
weights_th = S.shared(weights)
log_p_y_given_x = \
T.log(T.nnet.softmax((theta_th.T.dot(data_th.T)).T + bias_th.T))
f = -constf2 * T.sum((log_p_y_given_x[T.arange(total_samples_S), val_])
/ weights_th) + 0.5 * T.sum(theta_th ** 2)
manifold = Product((Euclidean(features, classes),
Euclidean(classes, 1)))
problem = Problem(manifold=manifold, cost=f, arg=[theta_th, bias_th],
verbosity=0)
solver = ConjugateGradient(mingradnorm=1e-6)
solution = solver.solve(problem)
theta = solution[0]
bias = solution[1]
del constf2
del theta_th
del bias_th
del data_th
del val_
del solver
del solution
return theta, bias
def _update_w(self, data_align, data_sup, labels, w, s, theta, bias):
"""
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
w : list of 2D array, element i has shape=[voxels_i, features]
The updated orthogonal transforms (mappings).
"""
subjects = len(data_align)
s_th = S.shared(s.astype(theano.config.floatX))
theta_th = S.shared(theta.T.astype(theano.config.floatX))
bias_th = S.shared(bias.T.astype(theano.config.floatX),
broadcastable=(True, False))
for subject in range(subjects):
logger.info('Subject Wi %d' % subject)
# Solve for subject i
# Create the theano function
w_th = T.matrix(name='W', dtype=theano.config.floatX)
data_srm_subject = \
S.shared(data_align[subject].astype(theano.config.floatX))
constf1 = \
S.shared((1 - self.alpha) * 0.5 / data_align[subject].shape[1],
allow_downcast=True)
f1 = constf1 * T.sum((data_srm_subject - w_th.dot(s_th))**2)
if data_sup[subject] is not None:
lr_samples_S = S.shared(data_sup[subject].shape[1])
data_sup_subject = \
S.shared(data_sup[subject].astype(theano.config.floatX))
labels_S = S.shared(labels[subject])
constf2 = S.shared(-self.alpha / self.gamma
/ data_sup[subject].shape[1],
allow_downcast=True)
log_p_y_given_x = T.log(T.nnet.softmax((theta_th.dot(
w_th.T.dot(data_sup_subject))).T + bias_th))
f2 = constf2 * T.sum(
log_p_y_given_x[T.arange(lr_samples_S), labels_S])
f = f1 + f2
else:
f = f1
# Define the problem and solve
f_subject = self._objective_function_subject(data_align[subject],
data_sup[subject],
labels[subject],
w[subject],
s, theta, bias)
minstep = np.amin(((10**-np.floor(np.log10(f_subject))), 1e-1))
manifold = Stiefel(w[subject].shape[0], w[subject].shape[1])
problem = Problem(manifold=manifold, cost=f, arg=w_th, verbosity=0)
solver = ConjugateGradient(mingradnorm=1e-2, minstepsize=minstep)
w[subject] = np.array(solver.solve(
problem, x=w[subject].astype(theano.config.floatX)))
if data_sup[subject] is not None:
del f2
del log_p_y_given_x
del data_sup_subject
del labels_S
del solver
del problem
del manifold
del f
del f1
del data_srm_subject
del w_th
del theta_th
del bias_th
del s_th
# Run garbage collector to avoid filling up the memory
gc.collect()
return w
@staticmethod
def _compute_shared_response(data, w):
""" Compute the shared response S
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
s : array, shape=[features, samples]
The shared response for the subjects data with the mappings in w.
"""
s = np.zeros((w[0].shape[1], data[0].shape[1]))
for m in range(len(w)):
s = s + w[m].T.dot(data[m])
s /= len(w)
return s
def _objective_function(self, data_align, data_sup, labels, w, s, theta,
bias):
"""Compute the objective function of the Semi-Supervised SRM
See :eq:`sssrm-eq`.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function evaluated based on the parameters to
this function.
"""
subjects = len(data_align)
# Compute the SRM loss
f_val = 0.0
for subject in range(subjects):
samples = data_align[subject].shape[1]
f_val += (1 - self.alpha) * (0.5 / samples) \
* np.linalg.norm(data_align[subject] - w[subject].dot(s),
'fro')**2
# Compute the MLR loss
f_val += self._loss_lr(data_sup, labels, w, theta, bias)
return f_val
def _objective_function_subject(self, data_align, data_sup, labels, w, s,
theta, bias):
"""Compute the objective function for one subject.
.. math:: (1-C)*Loss_{SRM}_i(W_i,S;X_i)
.. math:: + C/\\gamma * Loss_{MLR_i}(\\theta, bias; {(W_i^T*Z_i, y_i})
.. math:: + R(\\theta)
Parameters
----------
data_align : 2D array, shape=[voxels_i, samples_align]
Contains the fMRI data for alignment of subject i.
data_sup : 2D array, shape=[voxels_i, samples_i]
Contains the fMRI data of one subject for the classification task.
labels : array of int, shape=[samples_i]
The labels for the data samples in data_sup.
w : array, shape=[voxels_i, features]
The orthogonal transform (mapping) :math:`W_i` for subject i.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function for subject i evaluated on the
parameters to this function.
"""
# Compute the SRM loss
f_val = 0.0
samples = data_align.shape[1]
f_val += (1 - self.alpha) * (0.5 / samples) \
* np.linalg.norm(data_align - w.dot(s), 'fro')**2
# Compute the MLR loss
f_val += self._loss_lr_subject(data_sup, labels, w, theta, bias)
return f_val
def _loss_lr_subject(self, data, labels, w, theta, bias):
"""Compute the Loss MLR for a single subject (without regularization)
Parameters
----------
data : array, shape=[voxels, samples]
The fMRI data of subject i for the classification task.
labels : array of int, shape=[samples]
The labels for the data samples in data.
w : array, shape=[voxels, features]
The orthogonal transform (mapping) :math:`W_i` for subject i.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
loss : float
The loss MLR for the subject
"""
if data is None:
return 0.0
samples = data.shape[1]
thetaT_wi_zi_plus_bias = theta.T.dot(w.T.dot(data)) + bias
sum_exp, max_value, _ = utils.sumexp_stable(thetaT_wi_zi_plus_bias)
sum_exp_values = np.log(sum_exp) + max_value
aux = 0.0
for sample in range(samples):
label = labels[sample]
aux += thetaT_wi_zi_plus_bias[label, sample]
return self.alpha / samples / self.gamma * (sum_exp_values.sum() - aux)
def _loss_lr(self, data, labels, w, theta, bias):
"""Compute the Loss MLR (with the regularization)
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the samples in
data.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
loss : float
The loss MLR for the SS-SRM model
"""
subjects = len(data)
loss = 0.0
for subject in range(subjects):
if labels[subject] is not None:
loss += self._loss_lr_subject(data[subject], labels[subject],
w[subject], theta, bias)
return loss + 0.5 * np.linalg.norm(theta, 'fro')**2
@staticmethod
def _stack_list(data, data_labels, w):
"""Construct a numpy array by stacking arrays in a list
Parameter
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
data_labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the samples in
data.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
data_stacked : 2D array, shape=[samples, features]
The data samples from all subjects are stacked into a single
2D array, where "samples" is the sum of samples_i.
labels_stacked : array, shape=[samples,]
The labels from all subjects are stacked into a single
array, where "samples" is the sum of samples_i.
weights : array, shape=[samples,]
The number of samples of the subject that are related to that
sample. They become a weight per sample in the MLR loss.
"""
labels_stacked = utils.concatenate_not_none(data_labels)
weights = np.empty((labels_stacked.size,))
data_shared = [None] * len(data)
curr_samples = 0
for s in range(len(data)):
if data[s] is not None:
subject_samples = data[s].shape[1]
curr_samples_end = curr_samples + subject_samples
weights[curr_samples:curr_samples_end] = subject_samples
data_shared[s] = w[s].T.dot(data[s])
curr_samples += data[s].shape[1]
data_stacked = utils.concatenate_not_none(data_shared, axis=1).T
return data_stacked, labels_stacked, weights
|
apache-2.0
|
MarcSpitz/ldebroux_kjadin_masters-thesis_2014
|
src/nx_pylab.py
|
1
|
27881
|
"""
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.sourceforge.net/
pygraphviz: http://networkx.lanl.gov/pygraphviz/
"""
# Copyright (C) 2004-2012 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__author__ = """Aric Hagberg ([email protected])"""
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
**kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax=cf.add_axes((0,0,1,1))
else:
ax=cf.gca()
# allow callers to override the hold state by passing hold=True|False
b = plt.ishold()
h = kwds.pop('hold', None)
if h is not None:
plt.hold(h)
try:
draw_networkx(G,pos=pos,ax=ax,**kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
plt.hold(b)
raise
plt.hold(b)
return
def draw_networkx(G, pos=None, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_ cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits=plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos=nx.drawing.spring_layout(G) # default to spring layout
node_collection=draw_networkx_nodes(G, pos, **kwds)
edge_collection=draw_networkx_edges(G, pos, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label = None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=plt.gca()
if nodelist is None:
nodelist=G.nodes()
if not nodelist or len(nodelist)==0: # empty nodelist, no drawing
return None
try:
xy=numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection=ax.scatter(xy[:,0], xy[:,1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=None,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float
Line width of edges (default =1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter,Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=plt.gca()
if edgelist is None:
edgelist=G.edges()
if not edgelist or len(edgelist)==0: # no edges!
return None
# set edge positions
edge_pos=numpy.asarray([(pos[e[0]],pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color)==len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c,alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3,4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color)==1:
edge_colors = ( colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors = edge_colors,
linewidths = lw,
antialiaseds = (1,),
linestyle = style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection=None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos=[]
# p=1.0-0.25 # make head segment 25 percent of edge length
p=1.0-0.05 # make head segment 5 percent of edge length
for src,dst in edge_pos:
x1,y1=src
x2,y2=dst
dx=x2-x1 # x offset
dy=y2-y1 # y offset
d=numpy.sqrt(float(dx**2+dy**2)) # length of edge
if d==0: # source and target at same position
continue
if dx==0: # vertical edge
xa=x2
ya=dy*p+y1
if dy==0: # horizontal edge
ya=y2
xa=dx*p+x1
else:
theta=numpy.arctan2(dy,dx)
xa=p*d*numpy.cos(theta)+x1
ya=p*d*numpy.sin(theta)+y1
a_pos.append(((xa,ya),(x2,y2)))
arrow_collection = LineCollection(a_pos,
colors = arrow_colors,
linewidths = [4*ww for ww in lw],
antialiaseds = (1,),
transOffset = ax.transData,
)
# commented st. edges don't go behind nodes
# arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:,:,0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:,:,0]))
miny = numpy.amin(numpy.ravel(edge_pos[:,:,1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:,:,1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim( corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=plt.gca()
if labels is None:
labels=dict( (n,n) for n in G.nodes())
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
text_items={} # there is no text collection so we'll fake one
for n, label in labels.items():
(x,y)=pos[n]
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform = ax.transData,
clip_on=True,
)
text_items[n]=t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=plt.gca()
if edge_labels is None:
labels=dict( ((u,v), d) for u,v,d in G.edges(data=True) )
else:
labels = edge_labels
text_items={}
for (n1,n2), label in labels.items():
(x1,y1)=pos[n1]
(x2,y2)=pos[n2]
(x,y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle=numpy.arctan2(y2-y1,x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle-=180
if angle < - 90:
angle+=180
# transform data coordinate angle to screen coordinate angle
xy=numpy.array((x,y))
trans_angle=ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1,2)))[0]
else:
trans_angle=0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform = ax.transData,
bbox = bbox,
zorder = 1,
clip_on=True,
)
text_items[(n1,n2)]=t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout."""
draw(G,circular_layout(G),**kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout."""
draw(G,random_layout(G),**kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout."""
draw(G,spectral_layout(G),**kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout."""
draw(G,spring_layout(G),**kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout."""
nlist = kwargs.get('nlist', None)
if nlist != None:
del(kwargs['nlist'])
draw(G,shell_layout(G,nlist=nlist),**kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout."""
pos=nx.drawing.graphviz_layout(G,prog)
draw(G,pos,**kwargs)
def draw_nx(G,pos,**kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G,pos,**kwds)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS',warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
|
gpl-2.0
|
RachitKansal/scikit-learn
|
examples/missing_values.py
|
233
|
3056
|
"""
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
|
bsd-3-clause
|
jniediek/combinato
|
combinato/plot/spike_heatmap.py
|
1
|
1692
|
# -*- encoding: utf-8 -*-
# JN 2014-12-14
# function to plot heatmaps of clusters
from __future__ import absolute_import, division, print_function
import numpy as np
from matplotlib.pyplot import cm
cmap = cm.Blues
# idea taken from http://stackoverflow.com/a/14779462
cmaplist = [cmap(i) for i in range(int(cmap.N/4), cmap.N)]
# set first color to white
cmaplist[0] = (1, 1, 1, 1)
# set last color to black
cmaplist[-1] = (0, 0, 0, 1)
cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
spDisplayBorder = 5 # µV additional border in display
def spike_heatmap(ax, spikes, x=None, log=False):
"""
takes spikes, plots heatmap over samples and mean/std line
"""
spMin = spikes.min()
spMax = spikes.max()
spBins = np.linspace(spMin, spMax, int(round(2*spMax)))
if spBins.shape[0] < 3:
spBins = np.linspace(spMin, spMax, 3)
nSamp = spikes.shape[1]
if x is None:
x = range(nSamp)
imdata = np.zeros((len(spBins) - 1, nSamp))
for col in range(nSamp):
data = np.histogram(spikes[:, col], bins=spBins)[0]
if log:
imdata[:, col] = np.log(1 + data)
else:
imdata[:, col] = data
ydiff = (spBins[1] - spBins[0])/2.
extent = [x[0], x[-1], spMin-ydiff, spMax-ydiff]
ax.imshow(imdata,
cmap=cmap,
interpolation='hanning',
aspect='auto',
origin='lower',
extent=extent)
spMean = spikes.mean(0)
spStd = spikes.std(0)
ax.plot(x, spMean, 'k', lw=1)
ax.plot(x, spMean + spStd, color=(.2, .2, .2), lw=1)
ax.plot(x, spMean - spStd, color=(.2, .2, .2), lw=1)
ax.set_xlim((x[0], x[-1]))
|
mit
|
catchchaos/Movie-Recommender-GA-
|
recommender.py
|
2
|
6637
|
"""
Clustering and stuff
"""
from movielens import *
import sys
import time
import math
import re
import pickle
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
# Store data in arrays
user = []
item = []
rating = []
rating_test = []
# Load the movie lens dataset into arrays
d = Dataset()
d.load_users("data/u.user", user)
d.load_items("data/u.item", item)
d.load_ratings("data/u.base", rating)
d.load_ratings("data/u.test", rating_test)
n_users = len(user)
n_items = len(item)
# The utility matrix stores the rating for each user-item pair in the
# matrix form.
utility = np.zeros((n_users, n_items))
for r in rating:
utility[r.user_id - 1][r.item_id - 1] = r.rating
test = np.zeros((n_users, n_items))
for r in rating_test:
test[r.user_id - 1][r.item_id - 1] = r.rating
avg_ratings = np.asarray(utility).mean(0)
# print avg_ratings
with open("avg_rating.pkl", "w") as fp:
pickle.dump(avg_ratings, fp)
# Perform clustering on items
movie_genre = []
for movie in item:
movie_genre.append([movie.unknown, movie.action, movie.adventure, movie.animation, movie.childrens, movie.comedy,
movie.crime, movie.documentary, movie.drama, movie.fantasy, movie.film_noir, movie.horror,
movie.musical, movie.mystery, movie.romance, movie.sci_fi, movie.thriller, movie.war, movie.western])
movie_genre = np.array(movie_genre)
cluster = KMeans(n_clusters=19)
cluster.fit_predict(movie_genre)
with open("cluster.pkl", "w") as fp:
pickle.dump(cluster, fp)
utility_clustered = []
for i in range(0, n_users):
average = np.zeros(19)
tmp = []
for m in range(0, 19):
tmp.append([])
for j in range(0, n_items):
if utility[i][j] != 0:
tmp[cluster.labels_[j] - 1].append(utility[i][j])
for m in range(0, 19):
if len(tmp[m]) != 0:
average[m] = np.mean(tmp[m])
else:
average[m] = 0
utility_clustered.append(average)
utility_clustered = np.array(utility_clustered)
# Find the average rating for each user and stores it in the user's object
for i in range(0, n_users):
x = utility_clustered[i]
user[i].avg_r = sum(a for a in x if a > 0) / sum(a > 0 for a in x)
# Find the Pearson Correlation Similarity Measure between two users
def pcs(x, y):
num = 0
den1 = 0
den2 = 0
A = utility_clustered[x - 1]
B = utility_clustered[y - 1]
num = sum((a - user[x - 1].avg_r) * (b - user[y - 1].avg_r) for a, b in zip(A, B) if a > 0 and b > 0)
den1 = sum((a - user[x - 1].avg_r) ** 2 for a in A if a > 0)
den2 = sum((b - user[y - 1].avg_r) ** 2 for b in B if b > 0)
den = (den1 ** 0.5) * (den2 ** 0.5)
if den == 0:
return 0
else:
return num / den
userData=[]
for i in user:
userData.append([i.id, i.sex, i.age, i.occupation, i.zip])
le=LabelEncoder()
le.fit([a[3] for a in userData])
le2=LabelEncoder()
le2.fit([a[1] for a in userData])
le3=LabelEncoder()
le3.fit([a[4] for a in userData])
userData=[]
for i in user:
userData.append([i.id, int(le2.transform([i.sex])), i.age, int(le.transform([i.occupation])), int(le3.transform([i.zip]))])
userCluster = KMeans(n_clusters=4)
userCluster.fit_predict(userData)
for i in range(n_users):
userCluster.labels_[i]
#print len(userCluster.labels_)
pcs_matrix = np.zeros((n_users, n_users))
s=time.time()
for i in range(0, n_users):
for j in range(0, n_users):
if userCluster.labels_[i]!=userCluster.labels_[j]:
pcs_matrix[i][j]=0
continue
if i!=j:
pcs_matrix[i][j] = pcs(i + 1, j + 1)
sys.stdout.write("\rGenerating Similarity Matrix [%d:%d] = %f" % (i+1, j+1, pcs_matrix[i][j]))
sys.stdout.flush()
time.sleep(0.00005)
print "\rGenerating Similarity Matrix [%d:%d] = %f" % (i+1, j+1, pcs_matrix[i][j])
print time.time()-s
"""
pcs_matrix = np.zeros((n_users, n_users))
s=time.time()
for i in range(0, n_users):
for j in range(0, n_users):
if i!=j:
pcs_matrix[i][j] = pcs(i + 1, j + 1)
sys.stdout.write("\rGenerating Similarity Matrix [%d:%d] = %f" % (i+1, j+1, pcs_matrix[i][j]))
sys.stdout.flush()
time.sleep(0.00005)
print "\rGenerating Similarity Matrix [%d:%d] = %f" % (i+1, j+1, pcs_matrix[i][j])
print time.time()-s
#print pcs_matrix
"""
# Guesses the ratings that user with id, user_id, might give to item with id, i_id.
# We will consider the top_n similar users to do this.
def norm():
normalize = np.zeros((n_users, 19))
for i in range(0, n_users):
for j in range(0, 19):
if utility_clustered[i][j] != 0:
normalize[i][j] = utility_clustered[i][j] - user[i].avg_r
else:
normalize[i][j] = float('Inf')
return normalize
def guess(user_id, i_id, top_n):
similarity = []
for i in range(0, n_users):
if i+1 != user_id:
similarity.append(pcs_matrix[user_id-1][i])
temp = norm()
temp = np.delete(temp, user_id-1, 0)
top = [x for (y,x) in sorted(zip(similarity,temp), key=lambda pair: pair[0], reverse=True)]
s = 0
c = 0
for i in range(0, top_n):
if top[i][i_id-1] != float('Inf'):
s += top[i][i_id-1]
c += 1
g = user[user_id-1].avg_r if c == 0 else s/float(c) + user[user_id-1].avg_r
if g < 1.0:
return 1.0
elif g > 5.0:
return 5.0
else:
return g
utility_copy = np.copy(utility_clustered)
for i in range(0, n_users):
for j in range(0, 19):
if utility_copy[i][j] == 0:
sys.stdout.write("\rGuessing [User:Rating] = [%d:%d]" % (i, j))
sys.stdout.flush()
time.sleep(0.0001)
utility_copy[i][j] = guess(i+1, j+1, 150)
print "\rGuessing [User:Rating] = [%d:%d]" % (i, j)
print utility_copy
# Utility matrix is an n_users x n_movie_clusters(hybrid genres) matrix where utility_matrix[i][j] = average rating of user i to hybrid genre j
pickle.dump( utility_copy, open("utility_matrix.pkl", "wb"))
# Predict ratings for u.test and find the mean squared error
y_true = []
y_pred = []
f = open('test.txt', 'w')
for i in range(0, n_users):
for j in range(0, n_items):
if test[i][j] > 0:
f.write("%d, %d, %.4f\n" % (i+1, j+1, utility_copy[i][cluster.labels_[j]-1]))
y_true.append(test[i][j])
y_pred.append(utility_copy[i][cluster.labels_[j]-1])
f.close()
print "Mean Squared Error: %f" % mean_squared_error(y_true, y_pred)
|
mit
|
chetan51/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/transforms.py
|
69
|
75638
|
"""
matplotlib includes a framework for arbitrary geometric
transformations that is used determine the final position of all
elements drawn on the canvas.
Transforms are composed into trees of :class:`TransformNode` objects
whose actual value depends on their children. When the contents of
children change, their parents are automatically invalidated. The
next time an invalidated transform is accessed, it is recomputed to
reflect those changes. This invalidation/caching approach prevents
unnecessary recomputations of transforms, and contributes to better
interactive performance.
For example, here is a graph of the transform tree used to plot data
to the graph:
.. image:: ../_static/transforms.png
The framework can be used for both affine and non-affine
transformations. However, for speed, we want use the backend
renderers to perform affine transformations whenever possible.
Therefore, it is possible to perform just the affine or non-affine
part of a transformation on a set of data. The affine is always
assumed to occur after the non-affine. For any transform::
full transform == non-affine part + affine part
The backends are not expected to handle non-affine transformations
themselves.
"""
import numpy as np
from numpy import ma
from matplotlib._path import affine_transform
from numpy.linalg import inv
from weakref import WeakKeyDictionary
import warnings
try:
set
except NameError:
from sets import Set as set
import cbook
from path import Path
from _path import count_bboxes_overlapping_bbox, update_path_extents
DEBUG = False
if DEBUG:
import warnings
MaskedArray = ma.MaskedArray
class TransformNode(object):
"""
:class:`TransformNode` is the base class for anything that
participates in the transform tree and needs to invalidate its
parents or be invalidated. This includes classes that are not
really transforms, such as bounding boxes, since some transforms
depend on bounding boxes to compute their values.
"""
_gid = 0
# Invalidation may affect only the affine part. If the
# invalidation was "affine-only", the _invalid member is set to
# INVALID_AFFINE_ONLY
INVALID_NON_AFFINE = 1
INVALID_AFFINE = 2
INVALID = INVALID_NON_AFFINE | INVALID_AFFINE
# Some metadata about the transform, used to determine whether an
# invalidation is affine-only
is_affine = False
is_bbox = False
# If pass_through is True, all ancestors will always be
# invalidated, even if 'self' is already invalid.
pass_through = False
def __init__(self):
"""
Creates a new :class:`TransformNode`.
"""
# Parents are stored in a WeakKeyDictionary, so that if the
# parents are deleted, references from the children won't keep
# them alive.
self._parents = WeakKeyDictionary()
# TransformNodes start out as invalid until their values are
# computed for the first time.
self._invalid = 1
def __copy__(self, *args):
raise NotImplementedError(
"TransformNode instances can not be copied. " +
"Consider using frozen() instead.")
__deepcopy__ = __copy__
def invalidate(self):
"""
Invalidate this :class:`TransformNode` and all of its
ancestors. Should be called any time the transform changes.
"""
# If we are an affine transform being changed, we can set the
# flag to INVALID_AFFINE_ONLY
value = (self.is_affine) and self.INVALID_AFFINE or self.INVALID
# Shortcut: If self is already invalid, that means its parents
# are as well, so we don't need to do anything.
if self._invalid == value:
return
if not len(self._parents):
self._invalid = value
return
# Invalidate all ancestors of self using pseudo-recursion.
stack = [self]
while len(stack):
root = stack.pop()
# Stop at subtrees that have already been invalidated
if root._invalid != value or root.pass_through:
root._invalid = self.INVALID
stack.extend(root._parents.keys())
def set_children(self, *children):
"""
Set the children of the transform, to let the invalidation
system know which transforms can invalidate this transform.
Should be called from the constructor of any transforms that
depend on other transforms.
"""
for child in children:
child._parents[self] = None
if DEBUG:
_set_children = set_children
def set_children(self, *children):
self._set_children(*children)
self._children = children
set_children.__doc__ = _set_children.__doc__
def frozen(self):
"""
Returns a frozen copy of this transform node. The frozen copy
will not update when its children change. Useful for storing
a previously known state of a transform where
``copy.deepcopy()`` might normally be used.
"""
return self
if DEBUG:
def write_graphviz(self, fobj, highlight=[]):
"""
For debugging purposes.
Writes the transform tree rooted at 'self' to a graphviz "dot"
format file. This file can be run through the "dot" utility
to produce a graph of the transform tree.
Affine transforms are marked in blue. Bounding boxes are
marked in yellow.
*fobj*: A Python file-like object
"""
seen = set()
def recurse(root):
if root in seen:
return
seen.add(root)
props = {}
label = root.__class__.__name__
if root._invalid:
label = '[%s]' % label
if root in highlight:
props['style'] = 'bold'
props['shape'] = 'box'
props['label'] = '"%s"' % label
props = ' '.join(['%s=%s' % (key, val) for key, val in props.items()])
fobj.write('%s [%s];\n' %
(hash(root), props))
if hasattr(root, '_children'):
for child in root._children:
name = '?'
for key, val in root.__dict__.items():
if val is child:
name = key
break
fobj.write('%s -> %s [label="%s", fontsize=10];\n' % (
hash(root),
hash(child),
name))
recurse(child)
fobj.write("digraph G {\n")
recurse(self)
fobj.write("}\n")
else:
def write_graphviz(self, fobj, highlight=[]):
return
class BboxBase(TransformNode):
"""
This is the base class of all bounding boxes, and provides
read-only access to its data. A mutable bounding box is provided
by the :class:`Bbox` class.
The canonical representation is as two points, with no
restrictions on their ordering. Convenience properties are
provided to get the left, bottom, right and top edges and width
and height, but these are not stored explicity.
"""
is_bbox = True
is_affine = True
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
if DEBUG:
def _check(points):
if ma.isMaskedArray(points):
warnings.warn("Bbox bounds are a masked array.")
points = np.asarray(points)
if (points[1,0] - points[0,0] == 0 or
points[1,1] - points[0,1] == 0):
warnings.warn("Singular Bbox.")
_check = staticmethod(_check)
def frozen(self):
return Bbox(self.get_points().copy())
frozen.__doc__ = TransformNode.__doc__
def __array__(self, *args, **kwargs):
return self.get_points()
def is_unit(self):
"""
Returns True if the :class:`Bbox` is the unit bounding box
from (0, 0) to (1, 1).
"""
return list(self.get_points().flatten()) == [0., 0., 1., 1.]
def _get_x0(self):
return self.get_points()[0, 0]
x0 = property(_get_x0, None, None, """
(property) :attr:`x0` is the first of the pair of *x* coordinates that
define the bounding box. :attr:`x0` is not guaranteed to be
less than :attr:`x1`. If you require that, use :attr:`xmin`.""")
def _get_y0(self):
return self.get_points()[0, 1]
y0 = property(_get_y0, None, None, """
(property) :attr:`y0` is the first of the pair of *y* coordinates that
define the bounding box. :attr:`y0` is not guaranteed to be
less than :attr:`y1`. If you require that, use :attr:`ymin`.""")
def _get_x1(self):
return self.get_points()[1, 0]
x1 = property(_get_x1, None, None, """
(property) :attr:`x1` is the second of the pair of *x* coordinates that
define the bounding box. :attr:`x1` is not guaranteed to be
greater than :attr:`x0`. If you require that, use :attr:`xmax`.""")
def _get_y1(self):
return self.get_points()[1, 1]
y1 = property(_get_y1, None, None, """
(property) :attr:`y1` is the second of the pair of *y* coordinates that
define the bounding box. :attr:`y1` is not guaranteed to be
greater than :attr:`y0`. If you require that, use :attr:`ymax`.""")
def _get_p0(self):
return self.get_points()[0]
p0 = property(_get_p0, None, None, """
(property) :attr:`p0` is the first pair of (*x*, *y*) coordinates that
define the bounding box. It is not guaranteed to be the bottom-left
corner. For that, use :attr:`min`.""")
def _get_p1(self):
return self.get_points()[1]
p1 = property(_get_p1, None, None, """
(property) :attr:`p1` is the second pair of (*x*, *y*) coordinates that
define the bounding box. It is not guaranteed to be the top-right
corner. For that, use :attr:`max`.""")
def _get_xmin(self):
return min(self.get_points()[:, 0])
xmin = property(_get_xmin, None, None, """
(property) :attr:`xmin` is the left edge of the bounding box.""")
def _get_ymin(self):
return min(self.get_points()[:, 1])
ymin = property(_get_ymin, None, None, """
(property) :attr:`ymin` is the bottom edge of the bounding box.""")
def _get_xmax(self):
return max(self.get_points()[:, 0])
xmax = property(_get_xmax, None, None, """
(property) :attr:`xmax` is the right edge of the bounding box.""")
def _get_ymax(self):
return max(self.get_points()[:, 1])
ymax = property(_get_ymax, None, None, """
(property) :attr:`ymax` is the top edge of the bounding box.""")
def _get_min(self):
return [min(self.get_points()[:, 0]),
min(self.get_points()[:, 1])]
min = property(_get_min, None, None, """
(property) :attr:`min` is the bottom-left corner of the bounding
box.""")
def _get_max(self):
return [max(self.get_points()[:, 0]),
max(self.get_points()[:, 1])]
max = property(_get_max, None, None, """
(property) :attr:`max` is the top-right corner of the bounding box.""")
def _get_intervalx(self):
return self.get_points()[:, 0]
intervalx = property(_get_intervalx, None, None, """
(property) :attr:`intervalx` is the pair of *x* coordinates that define
the bounding box. It is not guaranteed to be sorted from left to
right.""")
def _get_intervaly(self):
return self.get_points()[:, 1]
intervaly = property(_get_intervaly, None, None, """
(property) :attr:`intervaly` is the pair of *y* coordinates that define
the bounding box. It is not guaranteed to be sorted from bottom to
top.""")
def _get_width(self):
points = self.get_points()
return points[1, 0] - points[0, 0]
width = property(_get_width, None, None, """
(property) The width of the bounding box. It may be negative if
:attr:`x1` < :attr:`x0`.""")
def _get_height(self):
points = self.get_points()
return points[1, 1] - points[0, 1]
height = property(_get_height, None, None, """
(property) The height of the bounding box. It may be negative if
:attr:`y1` < :attr:`y0`.""")
def _get_size(self):
points = self.get_points()
return points[1] - points[0]
size = property(_get_size, None, None, """
(property) The width and height of the bounding box. May be negative,
in the same way as :attr:`width` and :attr:`height`.""")
def _get_bounds(self):
x0, y0, x1, y1 = self.get_points().flatten()
return (x0, y0, x1 - x0, y1 - y0)
bounds = property(_get_bounds, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`,
:attr:`height`).""")
def _get_extents(self):
return self.get_points().flatten().copy()
extents = property(_get_extents, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`, :attr:`y1`).""")
def get_points(self):
return NotImplementedError()
def containsx(self, x):
"""
Returns True if *x* is between or equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x >= x0 and x <= x1))
or (x >= x1 and x <= x0))
def containsy(self, y):
"""
Returns True if *y* is between or equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y >= y0 and y <= y1))
or (y >= y1 and y <= y0))
def contains(self, x, y):
"""
Returns *True* if (*x*, *y*) is a coordinate inside the
bounding box or on its edge.
"""
return self.containsx(x) and self.containsy(y)
def overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 < ax1) or
(by2 < ay1) or
(bx1 > ax2) or
(by1 > ay2))
def fully_containsx(self, x):
"""
Returns True if *x* is between but not equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x > x0 and x < x1))
or (x > x1 and x < x0))
def fully_containsy(self, y):
"""
Returns True if *y* is between but not equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (x > y0 and x < y1))
or (x > y1 and x < y0))
def fully_contains(self, x, y):
"""
Returns True if (*x*, *y*) is a coordinate inside the bounding
box, but not on its edge.
"""
return self.fully_containsx(x) \
and self.fully_containsy(y)
def fully_overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*, but not on its edge alone.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 <= ax1) or
(by2 <= ay1) or
(bx1 >= ax2) or
(by1 >= ay2))
def transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the given transform.
"""
return Bbox(transform.transform(self.get_points()))
def inverse_transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the inverse of the given transform.
"""
return Bbox(transform.inverted().transform(self.get_points()))
coefs = {'C': (0.5, 0.5),
'SW': (0,0),
'S': (0.5, 0),
'SE': (1.0, 0),
'E': (1.0, 0.5),
'NE': (1.0, 1.0),
'N': (0.5, 1.0),
'NW': (0, 1.0),
'W': (0, 0.5)}
def anchored(self, c, container = None):
"""
Return a copy of the :class:`Bbox`, shifted to position *c*
within a container.
*c*: may be either:
* a sequence (*cx*, *cy*) where *cx* and *cy* range from 0
to 1, where 0 is left or bottom and 1 is right or top
* a string:
- 'C' for centered
- 'S' for bottom-center
- 'SE' for bottom-left
- 'E' for left
- etc.
Optional argument *container* is the box within which the
:class:`Bbox` is positioned; it defaults to the initial
:class:`Bbox`.
"""
if container is None:
container = self
l, b, w, h = container.bounds
if isinstance(c, str):
cx, cy = self.coefs[c]
else:
cx, cy = c
L, B, W, H = self.bounds
return Bbox(self._points +
[(l + cx * (w-W)) - L,
(b + cy * (h-H)) - B])
def shrunk(self, mx, my):
"""
Return a copy of the :class:`Bbox`, shrunk by the factor *mx*
in the *x* direction and the factor *my* in the *y* direction.
The lower left corner of the box remains unchanged. Normally
*mx* and *my* will be less than 1, but this is not enforced.
"""
w, h = self.size
return Bbox([self._points[0],
self._points[0] + [mx * w, my * h]])
def shrunk_to_aspect(self, box_aspect, container = None, fig_aspect = 1.0):
"""
Return a copy of the :class:`Bbox`, shrunk so that it is as
large as it can be while having the desired aspect ratio,
*box_aspect*. If the box coordinates are relative---that
is, fractions of a larger box such as a figure---then the
physical aspect ratio of that figure is specified with
*fig_aspect*, so that *box_aspect* can also be given as a
ratio of the absolute dimensions, not the relative dimensions.
"""
assert box_aspect > 0 and fig_aspect > 0
if container is None:
container = self
w, h = container.size
H = w * box_aspect/fig_aspect
if H <= h:
W = w
else:
W = h * fig_aspect/box_aspect
H = h
return Bbox([self._points[0],
self._points[0] + (W, H)])
def splitx(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with vertical lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
xf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
w = x1 - x0
for xf0, xf1 in zip(xf[:-1], xf[1:]):
boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]]))
return boxes
def splity(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with horizontal lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
yf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
h = y1 - y0
for yf0, yf1 in zip(yf[:-1], yf[1:]):
boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]))
return boxes
def count_contains(self, vertices):
"""
Count the number of vertices contained in the :class:`Bbox`.
*vertices* is a Nx2 Numpy array.
"""
if len(vertices) == 0:
return 0
vertices = np.asarray(vertices)
x0, y0, x1, y1 = self._get_extents()
dx0 = np.sign(vertices[:, 0] - x0)
dy0 = np.sign(vertices[:, 1] - y0)
dx1 = np.sign(vertices[:, 0] - x1)
dy1 = np.sign(vertices[:, 1] - y1)
inside = (abs(dx0 + dx1) + abs(dy0 + dy1)) <= 2
return np.sum(inside)
def count_overlaps(self, bboxes):
"""
Count the number of bounding boxes that overlap this one.
bboxes is a sequence of :class:`BboxBase` objects
"""
return count_bboxes_overlapping_bbox(self, bboxes)
def expanded(self, sw, sh):
"""
Return a new :class:`Bbox` which is this :class:`Bbox`
expanded around its center by the given factors *sw* and
*sh*.
"""
width = self.width
height = self.height
deltaw = (sw * width - width) / 2.0
deltah = (sh * height - height) / 2.0
a = np.array([[-deltaw, -deltah], [deltaw, deltah]])
return Bbox(self._points + a)
def padded(self, p):
"""
Return a new :class:`Bbox` that is padded on all four sides by
the given value.
"""
points = self._points
return Bbox(points + [[-p, -p], [p, p]])
def translated(self, tx, ty):
"""
Return a copy of the :class:`Bbox`, statically translated by
*tx* and *ty*.
"""
return Bbox(self._points + (tx, ty))
def corners(self):
"""
Return an array of points which are the four corners of this
rectangle. For example, if this :class:`Bbox` is defined by
the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns
(*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*).
"""
l, b, r, t = self.get_points().flatten()
return np.array([[l, b], [l, t], [r, b], [r, t]])
def rotated(self, radians):
"""
Return a new bounding box that bounds a rotated version of
this bounding box by the given radians. The new bounding box
is still aligned with the axes, of course.
"""
corners = self.corners()
corners_rotated = Affine2D().rotate(radians).transform(corners)
bbox = Bbox.unit()
bbox.update_from_data_xy(corners_rotated, ignore=True)
return bbox
#@staticmethod
def union(bboxes):
"""
Return a :class:`Bbox` that contains all of the given bboxes.
"""
assert(len(bboxes))
if len(bboxes) == 1:
return bboxes[0]
x0 = np.inf
y0 = np.inf
x1 = -np.inf
y1 = -np.inf
for bbox in bboxes:
points = bbox.get_points()
xs = points[:, 0]
ys = points[:, 1]
x0 = min(x0, np.min(xs))
y0 = min(y0, np.min(ys))
x1 = max(x1, np.max(xs))
y1 = max(y1, np.max(ys))
return Bbox.from_extents(x0, y0, x1, y1)
union = staticmethod(union)
class Bbox(BboxBase):
"""
A mutable bounding box.
"""
def __init__(self, points):
"""
*points*: a 2x2 numpy array of the form [[x0, y0], [x1, y1]]
If you need to create a :class:`Bbox` object from another form
of data, consider the static methods :meth:`unit`,
:meth:`from_bounds` and :meth:`from_extents`.
"""
BboxBase.__init__(self)
self._points = np.asarray(points, np.float_)
self._minpos = np.array([0.0000001, 0.0000001])
self._ignore = True
if DEBUG:
___init__ = __init__
def __init__(self, points):
self._check(points)
self.___init__(points)
def invalidate(self):
self._check(self._points)
TransformNode.invalidate(self)
_unit_values = np.array([[0.0, 0.0], [1.0, 1.0]], np.float_)
#@staticmethod
def unit():
"""
(staticmethod) Create a new unit :class:`Bbox` from (0, 0) to
(1, 1).
"""
return Bbox(Bbox._unit_values.copy())
unit = staticmethod(unit)
#@staticmethod
def from_bounds(x0, y0, width, height):
"""
(staticmethod) Create a new :class:`Bbox` from *x0*, *y0*,
*width* and *height*.
*width* and *height* may be negative.
"""
return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
from_bounds = staticmethod(from_bounds)
#@staticmethod
def from_extents(*args):
"""
(staticmethod) Create a new Bbox from *left*, *bottom*,
*right* and *top*.
The *y*-axis increases upwards.
"""
points = np.array(args, dtype=np.float_).reshape(2, 2)
return Bbox(points)
from_extents = staticmethod(from_extents)
def __repr__(self):
return 'Bbox(%s)' % repr(self._points)
__str__ = __repr__
def ignore(self, value):
"""
Set whether the existing bounds of the box should be ignored
by subsequent calls to :meth:`update_from_data` or
:meth:`update_from_data_xy`.
*value*:
- When True, subsequent calls to :meth:`update_from_data`
will ignore the existing bounds of the :class:`Bbox`.
- When False, subsequent calls to :meth:`update_from_data`
will include the existing bounds of the :class:`Bbox`.
"""
self._ignore = value
def update_from_data(self, x, y, ignore=None):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*x*: a numpy array of *x*-values
*y*: a numpy array of *y*-values
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
"""
warnings.warn(
"update_from_data requires a memory copy -- please replace with update_from_data_xy")
xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1))))
return self.update_from_data_xy(xy, ignore)
def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*path*: a :class:`~matplotlib.path.Path` instance
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if ignore is None:
ignore = self._ignore
if path.vertices.size == 0:
return
points, minpos, changed = update_path_extents(
path, None, self._points, self._minpos, ignore)
if changed:
self.invalidate()
if updatex:
self._points[:,0] = points[:,0]
self._minpos[0] = minpos[0]
if updatey:
self._points[:,1] = points[:,1]
self._minpos[1] = minpos[1]
def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*xy*: a numpy array of 2D points
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if len(xy) == 0:
return
path = Path(xy)
self.update_from_path(path, ignore=ignore,
updatex=updatex, updatey=updatey)
def _set_x0(self, val):
self._points[0, 0] = val
self.invalidate()
x0 = property(BboxBase._get_x0, _set_x0)
def _set_y0(self, val):
self._points[0, 1] = val
self.invalidate()
y0 = property(BboxBase._get_y0, _set_y0)
def _set_x1(self, val):
self._points[1, 0] = val
self.invalidate()
x1 = property(BboxBase._get_x1, _set_x1)
def _set_y1(self, val):
self._points[1, 1] = val
self.invalidate()
y1 = property(BboxBase._get_y1, _set_y1)
def _set_p0(self, val):
self._points[0] = val
self.invalidate()
p0 = property(BboxBase._get_p0, _set_p0)
def _set_p1(self, val):
self._points[1] = val
self.invalidate()
p1 = property(BboxBase._get_p1, _set_p1)
def _set_intervalx(self, interval):
self._points[:, 0] = interval
self.invalidate()
intervalx = property(BboxBase._get_intervalx, _set_intervalx)
def _set_intervaly(self, interval):
self._points[:, 1] = interval
self.invalidate()
intervaly = property(BboxBase._get_intervaly, _set_intervaly)
def _set_bounds(self, bounds):
l, b, w, h = bounds
points = np.array([[l, b], [l+w, b+h]], np.float_)
if np.any(self._points != points):
self._points = points
self.invalidate()
bounds = property(BboxBase._get_bounds, _set_bounds)
def _get_minpos(self):
return self._minpos
minpos = property(_get_minpos)
def _get_minposx(self):
return self._minpos[0]
minposx = property(_get_minposx)
def _get_minposy(self):
return self._minpos[1]
minposy = property(_get_minposy)
def get_points(self):
"""
Get the points of the bounding box directly as a numpy array
of the form: [[x0, y0], [x1, y1]].
"""
self._invalid = 0
return self._points
def set_points(self, points):
"""
Set the points of the bounding box directly from a numpy array
of the form: [[x0, y0], [x1, y1]]. No error checking is
performed, as this method is mainly for internal use.
"""
if np.any(self._points != points):
self._points = points
self.invalidate()
def set(self, other):
"""
Set this bounding box from the "frozen" bounds of another
:class:`Bbox`.
"""
if np.any(self._points != other.get_points()):
self._points = other.get_points()
self.invalidate()
class TransformedBbox(BboxBase):
"""
A :class:`Bbox` that is automatically transformed by a given
transform. When either the child bounding box or transform
changes, the bounds of this bbox will update accordingly.
"""
def __init__(self, bbox, transform):
"""
*bbox*: a child :class:`Bbox`
*transform*: a 2D :class:`Transform`
"""
assert bbox.is_bbox
assert isinstance(transform, Transform)
assert transform.input_dims == 2
assert transform.output_dims == 2
BboxBase.__init__(self)
self._bbox = bbox
self._transform = transform
self.set_children(bbox, transform)
self._points = None
def __repr__(self):
return "TransformedBbox(%s, %s)" % (self._bbox, self._transform)
__str__ = __repr__
def get_points(self):
if self._invalid:
points = self._transform.transform(self._bbox.get_points())
if ma.isMaskedArray(points):
points.putmask(0.0)
points = np.asarray(points)
self._points = points
self._invalid = 0
return self._points
get_points.__doc__ = Bbox.get_points.__doc__
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
class Transform(TransformNode):
"""
The base class of all :class:`TransformNode` instances that
actually perform a transformation.
All non-affine transformations should be subclasses of this class.
New affine transformations should be subclasses of
:class:`Affine2D`.
Subclasses of this class should override the following members (at
minimum):
- :attr:`input_dims`
- :attr:`output_dims`
- :meth:`transform`
- :attr:`is_separable`
- :attr:`has_inverse`
- :meth:`inverted` (if :meth:`has_inverse` can return True)
If the transform needs to do something non-standard with
:class:`mathplotlib.path.Path` objects, such as adding curves
where there were once line segments, it should override:
- :meth:`transform_path`
"""
# The number of input and output dimensions for this transform.
# These must be overridden (with integers) in the subclass.
input_dims = None
output_dims = None
# True if this transform as a corresponding inverse transform.
has_inverse = False
# True if this transform is separable in the x- and y- dimensions.
is_separable = False
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
def __add__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(self, other)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __radd__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(other, self)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __array__(self, *args, **kwargs):
"""
Used by C/C++ -based backends to get at the array matrix data.
"""
return self.frozen().__array__()
def transform(self, values):
"""
Performs the transformation on the given array of values.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
raise NotImplementedError()
def transform_affine(self, values):
"""
Performs only the affine part of this transformation on the
given array of values.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally a no-op. In
affine transformations, this is equivalent to
``transform(values)``.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return values
def transform_non_affine(self, values):
"""
Performs only the non-affine part of the transformation.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally equivalent to
``transform(values)``. In affine transformations, this is
always a no-op.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return self.transform(values)
def get_affine(self):
"""
Get the affine part of this transform.
"""
return IdentityTransform()
def transform_point(self, point):
"""
A convenience function that returns the transformed copy of a
single point.
The point is given as a sequence of length :attr:`input_dims`.
The transformed point is returned as a sequence of length
:attr:`output_dims`.
"""
assert len(point) == self.input_dims
return self.transform(np.asarray([point]))[0]
def transform_path(self, path):
"""
Returns a transformed copy of path.
*path*: a :class:`~matplotlib.path.Path` instance.
In some cases, this transform may insert curves into the path
that began as line segments.
"""
return Path(self.transform(path.vertices), path.codes)
def transform_path_affine(self, path):
"""
Returns a copy of path, transformed only by the affine part of
this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return path
def transform_path_non_affine(self, path):
"""
Returns a copy of path, transformed only by the non-affine
part of this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return Path(self.transform_non_affine(path.vertices), path.codes)
def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):
"""
Performs transformation on a set of angles anchored at
specific locations.
The *angles* must be a column vector (i.e., numpy array).
The *pts* must be a two-column numpy array of x,y positions
(angle transforms currently only work in 2D). This array must
have the same number of rows as *angles*.
*radians* indicates whether or not input angles are given in
radians (True) or degrees (False; the default).
*pushoff* is the distance to move away from *pts* for
determining transformed angles (see discussion of method
below).
The transformed angles are returned in an array with the same
size as *angles*.
The generic version of this method uses a very generic
algorithm that transforms *pts*, as well as locations very
close to *pts*, to find the angle in the transformed system.
"""
# Must be 2D
if self.input_dims <> 2 or self.output_dims <> 2:
raise NotImplementedError('Only defined in 2D')
# pts must be array with 2 columns for x,y
assert pts.shape[1] == 2
# angles must be a column vector and have same number of
# rows as pts
assert np.prod(angles.shape) == angles.shape[0] == pts.shape[0]
# Convert to radians if desired
if not radians:
angles = angles / 180.0 * np.pi
# Move a short distance away
pts2 = pts + pushoff * np.c_[ np.cos(angles), np.sin(angles) ]
# Transform both sets of points
tpts = self.transform( pts )
tpts2 = self.transform( pts2 )
# Calculate transformed angles
d = tpts2 - tpts
a = np.arctan2( d[:,1], d[:,0] )
# Convert back to degrees if desired
if not radians:
a = a * 180.0 / np.pi
return a
def inverted(self):
"""
Return the corresponding inverse transformation.
The return value of this method should be treated as
temporary. An update to *self* does not cause a corresponding
update to its inverted copy.
``x === self.inverted().transform(self.transform(x))``
"""
raise NotImplementedError()
class TransformWrapper(Transform):
"""
A helper class that holds a single child transform and acts
equivalently to it.
This is useful if a node of the transform tree must be replaced at
run time with a transform of a different type. This class allows
that replacement to correctly trigger invalidation.
Note that :class:`TransformWrapper` instances must have the same
input and output dimensions during their entire lifetime, so the
child transform may only be replaced with another child transform
of the same dimensions.
"""
pass_through = True
is_affine = False
def __init__(self, child):
"""
*child*: A class:`Transform` instance. This child may later
be replaced with :meth:`set`.
"""
assert isinstance(child, Transform)
Transform.__init__(self)
self.input_dims = child.input_dims
self.output_dims = child.output_dims
self._set(child)
self._invalid = 0
def __repr__(self):
return "TransformWrapper(%r)" % self._child
__str__ = __repr__
def frozen(self):
return self._child.frozen()
frozen.__doc__ = Transform.frozen.__doc__
def _set(self, child):
self._child = child
self.set_children(child)
self.transform = child.transform
self.transform_affine = child.transform_affine
self.transform_non_affine = child.transform_non_affine
self.transform_path = child.transform_path
self.transform_path_affine = child.transform_path_affine
self.transform_path_non_affine = child.transform_path_non_affine
self.get_affine = child.get_affine
self.inverted = child.inverted
def set(self, child):
"""
Replace the current child of this transform with another one.
The new child must have the same number of input and output
dimensions as the current child.
"""
assert child.input_dims == self.input_dims
assert child.output_dims == self.output_dims
self._set(child)
self._invalid = 0
self.invalidate()
self._invalid = 0
def _get_is_separable(self):
return self._child.is_separable
is_separable = property(_get_is_separable)
def _get_has_inverse(self):
return self._child.has_inverse
has_inverse = property(_get_has_inverse)
class AffineBase(Transform):
"""
The base class of all affine transformations of any number of
dimensions.
"""
is_affine = True
def __init__(self):
Transform.__init__(self)
self._inverted = None
def __array__(self, *args, **kwargs):
return self.get_matrix()
#@staticmethod
def _concat(a, b):
"""
Concatenates two transformation matrices (represented as numpy
arrays) together.
"""
return np.dot(b, a)
_concat = staticmethod(_concat)
def get_matrix(self):
"""
Get the underlying transformation matrix as a numpy array.
"""
raise NotImplementedError()
def transform_non_affine(self, points):
return points
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_affine(self, path):
return self.transform_path(path)
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
return path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Transform.get_affine.__doc__
class Affine2DBase(AffineBase):
"""
The base class of all 2D affine transformations.
2D affine transformations are performed using a 3x3 numpy array::
a c e
b d f
0 0 1
This class provides the read-only interface. For a mutable 2D
affine transformation, use :class:`Affine2D`.
Subclasses of this class will generally only need to override a
constructor and :meth:`get_matrix` that generates a custom 3x3 matrix.
"""
input_dims = 2
output_dims = 2
#* Redundant: Removed for performance
#
# def __init__(self):
# Affine2DBase.__init__(self)
def frozen(self):
return Affine2D(self.get_matrix().copy())
frozen.__doc__ = AffineBase.frozen.__doc__
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
def __array__(self, *args, **kwargs):
return self.get_matrix()
def to_values(self):
"""
Return the values of the matrix as a sequence (a,b,c,d,e,f)
"""
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flatten())
#@staticmethod
def matrix_from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new transformation matrix as a 3x3
numpy array of the form::
a c e
b d f
0 0 1
"""
return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_)
matrix_from_values = staticmethod(matrix_from_values)
def transform(self, points):
mtx = self.get_matrix()
if isinstance(points, MaskedArray):
tpoints = affine_transform(points.data, mtx)
return ma.MaskedArray(tpoints, mask=ma.getmask(points))
return affine_transform(points, mtx)
def transform_point(self, point):
mtx = self.get_matrix()
return affine_transform(point, mtx)
transform_point.__doc__ = AffineBase.transform_point.__doc__
if DEBUG:
_transform = transform
def transform(self, points):
# The major speed trap here is just converting to the
# points to an array in the first place. If we can use
# more arrays upstream, that should help here.
if (not ma.isMaskedArray(points) and
not isinstance(points, np.ndarray)):
warnings.warn(
('A non-numpy array of type %s was passed in for ' +
'transformation. Please correct this.')
% type(values))
return self._transform(points)
transform.__doc__ = AffineBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = AffineBase.transform_affine.__doc__
def inverted(self):
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
self._inverted = Affine2D(inv(mtx))
self._invalid = 0
return self._inverted
inverted.__doc__ = AffineBase.inverted.__doc__
class Affine2D(Affine2DBase):
"""
A mutable 2D affine transformation.
"""
def __init__(self, matrix = None):
"""
Initialize an Affine transform from a 3x3 numpy float array::
a c e
b d f
0 0 1
If *matrix* is None, initialize with the identity transform.
"""
Affine2DBase.__init__(self)
if matrix is None:
matrix = np.identity(3)
elif DEBUG:
matrix = np.asarray(matrix, np.float_)
assert matrix.shape == (3, 3)
self._mtx = matrix
self._invalid = 0
def __repr__(self):
return "Affine2D(%s)" % repr(self._mtx)
__str__ = __repr__
def __cmp__(self, other):
if (isinstance(other, Affine2D) and
(self.get_matrix() == other.get_matrix()).all()):
return 0
return -1
#@staticmethod
def from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new Affine2D instance from the given
values::
a c e
b d f
0 0 1
"""
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_)
.reshape((3,3)))
from_values = staticmethod(from_values)
def get_matrix(self):
"""
Get the underlying transformation matrix as a 3x3 numpy array::
a c e
b d f
0 0 1
"""
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
"""
Set the underlying transformation matrix from a 3x3 numpy array::
a c e
b d f
0 0 1
"""
self._mtx = mtx
self.invalidate()
def set(self, other):
"""
Set this transformation from the frozen copy of another
:class:`Affine2DBase` object.
"""
assert isinstance(other, Affine2DBase)
self._mtx = other.get_matrix()
self.invalidate()
#@staticmethod
def identity():
"""
(staticmethod) Return a new :class:`Affine2D` object that is
the identity transform.
Unless this transform will be mutated later on, consider using
the faster :class:`IdentityTransform` class instead.
"""
return Affine2D(np.identity(3))
identity = staticmethod(identity)
def clear(self):
"""
Reset the underlying matrix to the identity transform.
"""
self._mtx = np.identity(3)
self.invalidate()
return self
def rotate(self, theta):
"""
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
a = np.cos(theta)
b = np.sin(theta)
rotate_mtx = np.array(
[[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(rotate_mtx, self._mtx)
self.invalidate()
return self
def rotate_deg(self, degrees):
"""
Add a rotation (in degrees) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.rotate(degrees*np.pi/180.)
def rotate_around(self, x, y, theta):
"""
Add a rotation (in radians) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
"""
Add a rotation (in degrees) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
"""
Adds a translation in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
translate_mtx = np.array(
[[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(translate_mtx, self._mtx)
self.invalidate()
return self
def scale(self, sx, sy=None):
"""
Adds a scale in place.
If *sy* is None, the same scale is applied in both the *x*- and
*y*-directions.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
if sy is None:
sy = sx
scale_mtx = np.array(
[[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(scale_mtx, self._mtx)
self.invalidate()
return self
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
class IdentityTransform(Affine2DBase):
"""
A special class that does on thing, the identity transform, in a
fast way.
"""
_mtx = np.identity(3)
def frozen(self):
return self
frozen.__doc__ = Affine2DBase.frozen.__doc__
def __repr__(self):
return "IdentityTransform()"
__str__ = __repr__
def get_matrix(self):
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def transform(self, points):
return points
transform.__doc__ = Affine2DBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__
def transform_path(self, path):
return path
transform_path.__doc__ = Affine2DBase.transform_path.__doc__
transform_path_affine = transform_path
transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Affine2DBase.get_affine.__doc__
inverted = get_affine
inverted.__doc__ = Affine2DBase.inverted.__doc__
class BlendedGenericTransform(Transform):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This "generic" version can handle any given child transform in the
*x*- and *y*-directions.
"""
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
# Here we ask: "Does it blend?"
Transform.__init__(self)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
def _get_is_affine(self):
return self._x.is_affine and self._y.is_affine
is_affine = property(_get_is_affine)
def frozen(self):
return blended_transform_factory(self._x.frozen(), self._y.frozen())
frozen.__doc__ = Transform.frozen.__doc__
def __repr__(self):
return "BlendedGenericTransform(%s,%s)" % (self._x, self._y)
__str__ = __repr__
def transform(self, points):
x = self._x
y = self._y
if x is y and x.input_dims == 2:
return x.transform(points)
if x.input_dims == 2:
x_points = x.transform(points)[:, 0:1]
else:
x_points = x.transform(points[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform(points)[:, 1:]
else:
y_points = y.transform(points[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray):
return ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._x.is_affine and self._y.is_affine:
return points
return self.transform(points)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def get_affine(self):
if self._invalid or self._affine is None:
if self._x.is_affine and self._y.is_affine:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._affine = Affine2D(mtx)
else:
self._affine = IdentityTransform()
self._invalid = 0
return self._affine
get_affine.__doc__ = Transform.get_affine.__doc__
class BlendedAffine2D(Affine2DBase):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This version is an optimization for the case where both child
transforms are of type :class:`Affine2DBase`.
"""
is_separable = True
def __init__(self, x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
Both *x_transform* and *y_transform* must be 2D affine
transforms.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
assert x_transform.is_affine
assert y_transform.is_affine
assert x_transform.is_separable
assert y_transform.is_separable
Transform.__init__(self)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
Affine2DBase.__init__(self)
self._mtx = None
def __repr__(self):
return "BlendedAffine2D(%s,%s)" % (self._x, self._y)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
if self._x == self._y:
self._mtx = self._x.get_matrix()
else:
x_mtx = self._x.get_matrix()
y_mtx = self._y.get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def blended_transform_factory(x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to transform
the *x*-axis and *y_transform* to transform the *y*-axis.
A faster version of the blended transform is returned for the case
where both child transforms are affine.
"""
if (isinstance(x_transform, Affine2DBase)
and isinstance(y_transform, Affine2DBase)):
return BlendedAffine2D(x_transform, y_transform)
return BlendedGenericTransform(x_transform, y_transform)
class CompositeGenericTransform(Transform):
"""
A composite transform formed by applying transform *a* then
transform *b*.
This "generic" version can handle any two arbitrary
transformations.
"""
pass_through = True
def __init__(self, a, b):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Transform.__init__(self)
self._a = a
self._b = b
self.set_children(a, b)
def frozen(self):
self._invalid = 0
frozen = composite_transform_factory(self._a.frozen(), self._b.frozen())
if not isinstance(frozen, CompositeGenericTransform):
return frozen.frozen()
return frozen
frozen.__doc__ = Transform.frozen.__doc__
def _get_is_affine(self):
return self._a.is_affine and self._b.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._a.is_separable and self._b.is_separable
is_separable = property(_get_is_separable)
def __repr__(self):
return "CompositeGenericTransform(%s, %s)" % (self._a, self._b)
__str__ = __repr__
def transform(self, points):
return self._b.transform(
self._a.transform(points))
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._a.is_affine and self._b.is_affine:
return points
return self._b.transform_non_affine(
self._a.transform(points))
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
return self._b.transform_path(
self._a.transform_path(path))
transform_path.__doc__ = Transform.transform_path.__doc__
def transform_path_affine(self, path):
return self._b.transform_path_affine(
self._a.transform_path(path))
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
if self._a.is_affine and self._b.is_affine:
return path
return self._b.transform_path_non_affine(
self._a.transform_path(path))
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
if self._a.is_affine and self._b.is_affine:
return Affine2D(np.dot(self._b.get_affine().get_matrix(),
self._a.get_affine().get_matrix()))
else:
return self._b.get_affine()
get_affine.__doc__ = Transform.get_affine.__doc__
def inverted(self):
return CompositeGenericTransform(self._b.inverted(), self._a.inverted())
inverted.__doc__ = Transform.inverted.__doc__
class CompositeAffine2D(Affine2DBase):
"""
A composite transform formed by applying transform *a* then transform *b*.
This version is an optimization that handles the case where both *a*
and *b* are 2D affines.
"""
def __init__(self, a, b):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
Both *a* and *b* must be instances of :class:`Affine2DBase`.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
assert a.is_affine
assert b.is_affine
Affine2DBase.__init__(self)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
def __repr__(self):
return "CompositeAffine2D(%s, %s)" % (self._a, self._b)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def composite_transform_factory(a, b):
"""
Create a new composite transform that is the result of applying
transform a then transform b.
Shortcut versions of the blended transform are provided for the
case where both child transforms are affine, or one or the other
is the identity transform.
Composite transforms may also be created using the '+' operator,
e.g.::
c = a + b
"""
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, AffineBase) and isinstance(b, AffineBase):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b)
class BboxTransform(Affine2DBase):
"""
:class:`BboxTransform` linearly transforms points from one
:class:`Bbox` to another :class:`Bbox`.
"""
is_separable = True
def __init__(self, boxin, boxout):
"""
Create a new :class:`BboxTransform` that linearly transforms
points from *boxin* to *boxout*.
"""
assert boxin.is_bbox
assert boxout.is_bbox
Affine2DBase.__init__(self)
self._boxin = boxin
self._boxout = boxout
self.set_children(boxin, boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransform(%s, %s)" % (self._boxin, self._boxout)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
outl, outb, outw, outh = self._boxout.bounds
x_scale = outw / inw
y_scale = outh / inh
if DEBUG and (x_scale == 0 or y_scale == 0):
raise ValueError("Transforming from or to a singular bounding box.")
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)],
[0.0 , y_scale, (-inb*y_scale+outb)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformTo(Affine2DBase):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox`.
"""
is_separable = True
def __init__(self, boxout):
"""
Create a new :class:`BboxTransformTo` that linearly transforms
points from the unit bounding box to *boxout*.
"""
assert boxout.is_bbox
Affine2DBase.__init__(self)
self._boxout = boxout
self.set_children(boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformTo(%s)" % (self._boxout)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
outl, outb, outw, outh = self._boxout.bounds
if DEBUG and (outw == 0 or outh == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[outw, 0.0, outl],
[ 0.0, outh, outb],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformFrom(Affine2DBase):
"""
:class:`BboxTransformFrom` linearly transforms points from a given
:class:`Bbox` to the unit bounding box.
"""
is_separable = True
def __init__(self, boxin):
assert boxin.is_bbox
Affine2DBase.__init__(self)
self._boxin = boxin
self.set_children(boxin)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformFrom(%s)" % (self._boxin)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
if DEBUG and (inw == 0 or inh == 0):
raise ValueError("Transforming from a singular bounding box.")
x_scale = 1.0 / inw
y_scale = 1.0 / inh
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)],
[0.0 , y_scale, (-inb*y_scale)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class ScaledTranslation(Affine2DBase):
"""
A transformation that translates by *xt* and *yt*, after *xt* and *yt*
have been transformad by the given transform *scale_trans*.
"""
def __init__(self, xt, yt, scale_trans):
Affine2DBase.__init__(self)
self._t = (xt, yt)
self._scale_trans = scale_trans
self.set_children(scale_trans)
self._mtx = None
self._inverted = None
def __repr__(self):
return "ScaledTranslation(%s)" % (self._t,)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
xt, yt = self._scale_trans.transform_point(self._t)
self._mtx = np.array([[1.0, 0.0, xt],
[0.0, 1.0, yt],
[0.0, 0.0, 1.0]],
np.float_)
self._invalid = 0
self._inverted = None
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class TransformedPath(TransformNode):
"""
A :class:`TransformedPath` caches a non-affine transformed copy of
the :class:`~matplotlib.path.Path`. This cached copy is
automatically updated when the non-affine part of the transform
changes.
"""
def __init__(self, path, transform):
"""
Create a new :class:`TransformedPath` from the given
:class:`~matplotlib.path.Path` and :class:`Transform`.
"""
assert isinstance(transform, Transform)
TransformNode.__init__(self)
self._path = path
self._transform = transform
self.set_children(transform)
self._transformed_path = None
self._transformed_points = None
def _revalidate(self):
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._transformed_points = \
Path(self._transform.transform_non_affine(self._path.vertices))
self._invalid = 0
def get_transformed_points_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation. Unlike
:meth:`get_transformed_path_and_affine`, no interpolation will
be performed.
"""
self._revalidate()
return self._transformed_points, self.get_affine()
def get_transformed_path_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation.
"""
self._revalidate()
return self._transformed_path, self.get_affine()
def get_fully_transformed_path(self):
"""
Return a fully-transformed copy of the child path.
"""
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._invalid = 0
return self._transform.transform_path_affine(self._transformed_path)
def get_affine(self):
return self._transform.get_affine()
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
'''
Ensure the endpoints of a range are finite and not too close together.
"too close" means the interval is smaller than 'tiny' times
the maximum absolute value.
If they are too close, each will be moved by the 'expander'.
If 'increasing' is True and vmin > vmax, they will be swapped,
regardless of whether they are too close.
If either is inf or -inf or nan, return - expander, expander.
'''
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
if vmax - vmin <= max(abs(vmin), abs(vmax)) * tiny:
if vmin == 0.0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax
def interval_contains(interval, val):
a, b = interval
return (
((a < b) and (a <= val and b >= val))
or (b <= val and a >= val))
def interval_contains_open(interval, val):
a, b = interval
return (
((a < b) and (a < val and b > val))
or (b < val and a > val))
def offset_copy(trans, fig, x=0.0, y=0.0, units='inches'):
'''
Return a new transform with an added offset.
args:
trans is any transform
kwargs:
fig is the current figure; it can be None if units are 'dots'
x, y give the offset
units is 'inches', 'points' or 'dots'
'''
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif not units == 'inches':
raise ValueError('units must be dots, points, or inches')
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
|
gpl-3.0
|
morrigan/user-behavior-anomaly-detector
|
src/ocsvm.py
|
1
|
5421
|
#!/usr/bin/python
import json
import lsanomaly
import matplotlib.pyplot as plt
import numpy as np
import pandas
from keras.preprocessing import sequence
from sklearn import metrics
from sklearn import svm
import helpers
class OCSVM:
def __init__(self):
self.settings = helpers.ConfigSectionMap('settings.ini', 'OCSVM')
self.data = helpers.ConfigSectionMap('settings.ini', 'Data')
def print_accuracy(self, title, datasetY, predictions):
print title
print("accuracy: ", metrics.accuracy_score(datasetY, predictions))
print("precision: ", metrics.precision_score(datasetY, predictions))
print("recall: ", metrics.recall_score(datasetY, predictions))
print("f1: ", metrics.f1_score(datasetY, predictions))
print("area under curve (auc): ", metrics.roc_auc_score(datasetY, predictions))
def replace_in_list(self, list, oldChar, newChar):
for n, i in enumerate(list):
if i == 'anomaly':
list[n] = -1.
def save_parameters(self, model):
parameters = model.get_params()
parameters = json.dumps(parameters)
with open("model_ocsvm.json", "w") as json_file:
json_file.write(parameters)
print("Saved parameters to filesystem")
def load_parameters(self, model, model_filename = 'model_ocsvm.json'):
json_file = open(model_filename, 'r')
loaded_parameters = json.loads(json_file)
json_file.close()
print "Loaded parameters from filesystem."
return model.set_params(loaded_parameters)
def train_with_scikit(self, trainX, testX):
settings = self.settings
if (settings['load_parameters'] == True):
parameters = self.load_parameters()
clf = svm.OneClassSVM(parameters)
else:
clf = svm.OneClassSVM(nu=settings['nu'], kernel=settings['kernel'], gamma=settings['gamma'], verbose=settings['verbose'])
clf.fit(trainX)
y_pred_train = clf.predict(trainX)
y_pred_test = clf.predict(testX)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
return y_pred_train, y_pred_test, n_error_train, n_error_test
def train_with_lsanomaly(self, trainX, testX):
anomalymodel = lsanomaly.LSAnomaly()
anomalymodel.fit(trainX)
y_pred_train = anomalymodel.predict(trainX)
y_pred_test = anomalymodel.predict(testX)
# Process results
self.replace_in_list(y_pred_train, 'anomaly', -1)
self.replace_in_list(y_pred_test, 'anomaly', -1)
n_error_train = y_pred_train.count(-1)
n_error_test = y_pred_test.count(-1)
return y_pred_train, y_pred_test, n_error_train, n_error_test
def run(self):
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
max_vector_length = 30
# Create datasets
train_dataset = pandas.read_csv(self.data['train_dataset_file'], delimiter=';', engine='python')
test_dataset = pandas.read_csv(self.data['test_dataset_file'], delimiter=';', engine='python')
train_dataset = train_dataset[:len(train_dataset)/6]
# Convert strings
train_dataset_array = helpers.collection_values_to_array(train_dataset)
test_dataset_array = helpers.collection_values_to_array(test_dataset)
# Padding (from left)
trainX = sequence.pad_sequences(train_dataset_array, maxlen=max_vector_length)
testX = sequence.pad_sequences(test_dataset_array, maxlen=max_vector_length) #padding='pre'
assert (trainX.shape[1] == testX.shape[1])
# fit the model
if (self.settings['use_lsanomaly'] == True):
y_pred_train, y_pred_test, n_error_train, n_error_test = self.train_with_lsanomaly(trainX, testX)
else:
y_pred_train, y_pred_test, n_error_train, n_error_test = self.train_with_scikit(trainX, testX)
#testX_plot = []
#for n, i in enumerate(testX):
# for m, j in enumerate(testX):
# if i >= 0:
# testX_plot.append(n)
#plt.set_cmap(plt.cm.Paired)
#plt.scatter(trainX[y_pred_train>0], trainX[y_pred_train>0], c='black', label='inliers')
#plt.scatter(trainX[y_pred_train <= 0], trainX[y_pred_train <= 0], c='red', label='outliers')
#plt.scatter(testX_plot, testX_plot, c='black', label='inliers')
#plt.scatter(testX[y_pred_test < 0], testX[y_pred_test < 0], c='red', label='outliers')
#plt.axis('tight')
#plt.legend()
#plt.show()
# Visualize
plt.title("Novelty Detection")
plt.figure(1)
plt.subplot(211)
plt.plot(trainX, 'ro', testX, 'g^')
plt.subplot(212)
plt.plot(y_pred_train, 'ro', y_pred_test, 'g^')
plt.xlabel(
"Anomalies in training set: %d/%d; Anomalies in test set: %d/%d;"
% (n_error_train, trainX.shape[0], n_error_test, testX.shape[0]))
plt.show()
# Display accuracy on validation set
#print_accuracy("Validation", testX, y_pred_test)
#plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
#a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
#plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
|
mit
|
richter-t/espresso
|
doc/tutorials/python/08-visualization/scripts/visualization.py
|
4
|
3576
|
from __future__ import print_function
import espressomd._system as es
import espressomd
from espressomd import visualization
import numpy
from matplotlib import pyplot
from threading import Thread
# System parameters
#############################################################
# 10 000 Particles
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System()
system.time_step = 0.01
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min_dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 100
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.non_bonded_inter.set_force_cap(lj_cap)
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=numpy.random.random(3) * system.box_l)
system.analysis.distto(0)
act_min_dist = system.analysis.mindist()
system.cell_system.max_num_cells = 2744
visualizer = visualization.mayaviLive(system)
#visualizer = visualization.openGLLive(system)
#############################################################
# Warmup Integration #
#############################################################
# set LJ cap
lj_cap = 20
system.non_bonded_inter.set_force_cap(lj_cap)
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
system.integrator.run(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.mindist()
i += 1
# Increase LJ cap
lj_cap = lj_cap + 10
system.non_bonded_inter.set_force_cap(lj_cap)
#############################################################
# Integration #
#############################################################
# remove force capping
lj_cap = 0
system.non_bonded_inter.set_force_cap(lj_cap)
energies = numpy.empty((int_steps,2))
current_time = -1
pyplot.xlabel("time")
pyplot.ylabel("energy")
plot, = pyplot.plot([0],[0])
pyplot.show(block=False)
def update_plot():
if current_time < 0:
return
i = current_time
plot.set_xdata(energies[:i+1,0])
plot.set_ydata(energies[:i+1,1])
pyplot.xlim(0, energies[i,0])
pyplot.ylim(energies[:i+1,1].min(), energies[:i+1,1].max())
pyplot.draw()
def main():
global current_time
for i in range(0, int_n_times):
print("run %d at time=%f " % (i, system.time))
system.integrator.run(int_steps)
energies[i] = (system.time, system.analysis.energy()['total'])
current_time = i
visualizer.update()
t = Thread(target=main)
t.daemon = True
t.start()
visualizer.register_callback(update_plot, interval=500)
visualizer.start()
# terminate program
print("\nFinished.")
|
gpl-3.0
|
eickenberg/scikit-learn
|
examples/linear_model/plot_lasso_model_selection.py
|
311
|
5431
|
"""
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
|
bsd-3-clause
|
cfjhallgren/shogun
|
applications/easysvm/tutpaper/svm_params.py
|
12
|
12908
|
#from matplotlib import rc
#rc('text', usetex=True)
fontsize = 16
contourFontsize = 12
showColorbar = False
xmin = -1
xmax = 1
ymin = -1.05
ymax = 1
import sys,os
import numpy
import shogun
from shogun import GaussianKernel, LinearKernel, PolyKernel
from shogun import RealFeatures, BinaryLabels
from shogun import LibSVM
from numpy import arange
import matplotlib
from matplotlib import pylab
pylab.rcParams['contour.negative_linestyle'] = 'solid'
def features_from_file(fileName) :
fileHandle = open(fileName)
fileHandle.readline()
features = []
labels = []
for line in fileHandle :
tokens = line.split(',')
labels.append(float(tokens[1]))
features.append([float(token) for token in tokens[2:]])
return RealFeatures(numpy.transpose(numpy.array(features))), features, BinaryLabels(numpy.array(labels,numpy.float))
def create_kernel(kname, features, kparam=None) :
if kname == 'gauss' :
kernel = GaussianKernel(features, features, kparam)
elif kname == 'linear':
kernel = LinearKernel(features, features)
elif kname == 'poly' :
kernel = PolyKernel(features, features, kparam, True, False)
return kernel
def svm_train(kernel, labels, C1, C2=None):
"""Trains a SVM with the given kernel"""
num_threads = 1
kernel.io.disable_progress()
svm = LibSVM(C1, kernel, labels)
if C2:
svm.set_C(C1, C2)
svm.parallel.set_num_threads(num_threads)
svm.io.disable_progress()
svm.train()
return svm
def svm_test(svm, kernel, features_train, features_test) :
"""predicts on the test examples"""
kernel.init(features_train, features_test)
output = svm.apply().get_labels()
return output
def decision_boundary_plot(svm, features, vectors, labels, kernel, fileName = None, **args) :
title = None
if 'title' in args :
title = args['title']
xlabel = None
if 'xlabel' in args :
xlabel = args['xlabel']
ylabel = None
if 'ylabel' in args :
ylabel = args['ylabel']
fontsize = 'medium'
if 'fontsize' in args :
fontsize = args['fontsize']
contourFontsize = 10
if 'contourFontsize' in args :
contourFontsize = args['contourFontsize']
showColorbar = True
if 'showColorbar' in args :
showColorbar = args['showColorbar']
show = True
if fileName is not None :
show = False
if 'show' in args :
show = args['show']
# setting up the grid
delta = 0.005
x = arange(xmin, xmax, delta)
y = arange(ymin, ymax, delta)
Z = numpy.zeros((len(x), len(y)), numpy.float_)
gridX = numpy.zeros((len(x) *len(y), 2), numpy.float_)
n = 0
for i in range(len(x)) :
for j in range(len(y)) :
gridX[n][0] = x[i]
gridX[n][1] = y[j]
n += 1
if kernel.get_name() == 'Linear' and 'customwandb' in args:
kernel.init_optimization_svm(svm)
b=svm.get_bias()
w=kernel.get_w()
kernel.set_w(args['customwandb'][0])
svm.set_bias(args['customwandb'][1])
if kernel.get_name() == 'Linear' and 'drawarrow' in args:
kernel.init_optimization_svm(svm)
b=svm.get_bias()
w=kernel.get_w()
s=1.0/numpy.dot(w,w)/1.17
pylab.arrow(0,-b/w[1], w[0]*s,s*w[1], width=0.01, fc='#dddddd', ec='k')
grid_features = RealFeatures(numpy.transpose(gridX))
results = svm_test(svm, kernel, features, grid_features)
n = 0
for i in range(len(x)) :
for j in range(len(y)) :
Z[i][j] = results[n]
n += 1
cdict = {'red' :((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
'green':((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
'blue' :((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
}
my_cmap = matplotlib.colors.LinearSegmentedColormap('lightgray',cdict,256)
im = pylab.imshow(numpy.transpose(Z),
interpolation='bilinear', origin='lower',
cmap=my_cmap, extent=(xmin,xmax,ymin,ymax) )
if 'decisionboundaryonly' in args:
C1 = pylab.contour(numpy.transpose(Z),
[0],
origin='lower',
linewidths=(3),
colors = ['k'],
extent=(xmin,xmax,ymin,ymax))
else:
C1 = pylab.contour(numpy.transpose(Z),
[-1,0,1],
origin='lower',
linewidths=(1,3,1),
colors = ['k','k'],
extent=(xmin,xmax,ymin,ymax))
pylab.clabel(C1,
inline=1,
fmt='%1.1f',
fontsize=contourFontsize)
# plot the data
lab=labels.get_labels()
vec=numpy.array(vectors)
idx=numpy.where(lab==-1)[0]
pylab.scatter(vec[idx,0], vec[idx,1], s=300, c='#4444ff', marker='o', alpha=0.8, zorder=100)
idx=numpy.where(lab==+1)[0]
pylab.scatter(vec[idx,0], vec[idx,1], s=500, c='#ff4444', marker='s', alpha=0.8, zorder=100)
# plot SVs
if not 'decisionboundaryonly' in args:
training_outputs = svm_test(svm, kernel, features, features)
sv_idx=numpy.where(abs(training_outputs)<=1.01)[0]
pylab.scatter(vec[sv_idx,0], vec[sv_idx,1], s=100, c='k', marker='o', alpha=0.8, zorder=100)
if 'showmovedpoint' in args:
x=-0.779838709677
y=-0.1375
pylab.scatter([x], [y], s=300, c='#4e4e61', marker='o', alpha=1, zorder=100, edgecolor='#454548')
pylab.arrow(x,y-0.1, 0, -0.8/1.5, width=0.01, fc='#dddddd', ec='k')
#pylab.show()
if title is not None :
pylab.title(title, fontsize=fontsize)
if ylabel:
pylab.ylabel(ylabel,fontsize=fontsize)
if xlabel:
pylab.xlabel(xlabel,fontsize=fontsize)
if showColorbar :
pylab.colorbar(im)
# colormap:
pylab.hot()
if fileName is not None :
pylab.savefig(fileName)
if show :
pylab.show()
def add_percent_ticks():
ticks=pylab.getp(pylab.gca(),'xticks')
ticklabels=len(ticks)*['']
ticklabels[0]='0%'
ticklabels[-1]='100%'
pylab.setp(pylab.gca(), xticklabels=ticklabels)
pylab.setp(pylab.gca(), yticklabels=['0%','100%'])
ticks=pylab.getp(pylab.gca(),'yticks')
ticklabels=len(ticks)*['']
#ticklabels[0]='0%'
ticklabels[-1]='100%'
pylab.setp(pylab.gca(), yticklabels=ticklabels)
xticklabels = pylab.getp(pylab.gca(), 'xticklabels')
yticklabels = pylab.getp(pylab.gca(), 'yticklabels')
pylab.setp(xticklabels, fontsize=fontsize)
pylab.setp(yticklabels, fontsize=fontsize)
def create_figures(extension = 'pdf', directory = '../../tex/figures') :
if extension[0] != '.' :
extension = '.' + extension
dpi=90
# data and linear decision boundary
features,vectors,labels = features_from_file('data/small_gc_toy.data')
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 0.7)
pylab.figure(figsize=(8,6), dpi=dpi)
decision_boundary_plot(svm, features, vectors, labels, kernel,
fontsize=fontsize, contourFontsize=contourFontsize,
title="Linear Separation", customwandb=(numpy.array([-0.05, -1.0]), -0.3),
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
show=False, showColorbar=showColorbar, decisionboundaryonly=True)
add_percent_ticks()
pylab.savefig(os.path.join(directory, 'data_and_linear_classifier' + extension))
pylab.close()
#####################################################################################
# data and svm decision boundary
features,vectors,labels = features_from_file('data/small_gc_toy.data')
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 100)
pylab.figure(figsize=(8,6), dpi=dpi)
decision_boundary_plot(svm, features, vectors, labels, kernel,
fontsize=fontsize, contourFontsize=contourFontsize,
title="Maximum Margin Separation", drawarrow=True,
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
show=False, showColorbar=showColorbar)
add_percent_ticks()
pylab.savefig(os.path.join(directory, 'data_and_svm_classifier' + extension))
pylab.close()
#####################################################################################
# the effect of C on the decision surface:
features,vectors,labels = features_from_file('data/small_gc_toy_outlier.data')
pylab.figure(figsize=(16,6), dpi=dpi)
pylab.subplot(121)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 200)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Soft-Margin with C=200', ylabel="GC Content Before 'AG'",
xlabel="GC Content After 'AG'", fontsize=fontsize,
contourFontsize=contourFontsize, show=False, showmovedpoint=True,
showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(122)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 2)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Soft-Margin with C=2',
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False, showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'effect_of_c' + extension))
pylab.close()
####################################################################################
# playing with nonlinear data:
# the effect of kernel parameters
features,vectors,labels = features_from_file('data/small_gc_toy_outlier.data')
pylab.figure(figsize=(24,6), dpi=dpi)
pylab.subplot(131)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Linear Kernel',
ylabel="GC Content Before 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(132)
kernel = create_kernel('poly', features, 2)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Polynomial Kernel d=2',
xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(133)
kernel = create_kernel('poly', features, 5)
svm = svm_train(kernel, labels, 10)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Polynomial Kernel d=5',
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'params_polynomial' + extension))
pylab.close()
####################################################################################
#effects of sigma
pylab.figure(figsize=(24,6), dpi=dpi)
pylab.subplot(131)
gamma = 0.1
sigma = 20.0
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=20',
ylabel="GC Content Before 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(132)
sigma = 1.0
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=1',
xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(133)
sigma = 0.05
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=0.05',
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'params_gaussian' + extension))
pylab.close()
####################################################################################
if __name__ == '__main__' :
extension = 'pdf'
if len(sys.argv) > 1 :
extension = sys.argv[1]
pylab.ioff()
create_figures(extension)
|
gpl-3.0
|
sameersingh/onebusaway
|
ml/oba_ml/common.py
|
1
|
2239
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def get_feature_names():
return open("features_names.txt").read().splitlines()
def get_data(filename):
data = np.loadtxt(filename)
y = np.array(data[:,data.shape[1]-1]).reshape(data.shape[0],1)
x = np.array(data[:,0:data.shape[1]-1])
return (x, y)
def get_rmse(y, y_hat):
err_our = y_hat - y
rmse_our = np.sqrt(np.mean(err_our**2))
err_oba = y
rmse_oba = np.sqrt(np.mean(err_oba**2))
return (rmse_our, rmse_oba)
def save_scatter_plot(y, y_hat, set_name):
fig = plt.figure()
fig.suptitle("Y vs. Y_hat - {} set".format(set_name), fontsize=13, fontweight='bold')
plt.scatter(y, y_hat)
plt.xlabel("y")
plt.ylabel("y_hat")
plt.savefig("y_vs_yhat_{}.png".format(set_name))
def save_histogram(y_yhat, set_name):
fig = plt.figure()
fig.suptitle("Y-Yhat Histogram - {} set".format(set_name))
plt.hist(y_yhat, 10)
plt.savefig("y_yhat_histogram_{}.png".format(set_name))
build_files(y_hat_train, y_hat_test, y, y_test)
print_weights_feature_names(w, feature_names);
report_range(y)
def build_output_files(y_hat_train, y_hat_test, y, y_test):
feature_names_train = open("labels_training.txt").read().splitlines()
appended_train = np.column_stack((feature_names_train, y_hat_train))
np.savetxt("y_hat_train.csv", appended_train, delimiter=",", fmt="%s")
feature_names_test = open("labels_test.txt").read().splitlines()
appended_test = np.column_stack((feature_names_test, y_hat_test))
np.savetxt("y_hat_test.csv", appended_test, delimiter=",", fmt="%s")
appended_train_y = np.column_stack((feature_names_train, y))
np.savetxt("y_train.csv", appended_train_y, delimiter=",", fmt="%s")
appended_test_y = np.column_stack((feature_names_test, y_test))
np.savetxt("y_test.csv", appended_test_y, delimiter=",", fmt="%s")
def print_weights(w, feature_names):
print np.column_stack((w, feature_names))
def report_range(y):
print "ymin {} - index {}".format(y[np.argmin(y[:,0]),0], np.argmin(y[:,0]))
print "ymax {} - index {}".format(y[np.argmax(y[:,0]),0], np.argmax(y[:,0]))
|
apache-2.0
|
jayshonzs/ESL
|
EM/MixtrueGaussian.py
|
1
|
3860
|
'''
Created on 2014-8-27
@author: xiajie
'''
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
import matplotlib as mpl
def simulate_data():
data_set = []
centers = [(0.18,0.4),(0.5,0.5),(0.82,0.6)]
cov0 = [[0.01,.005],[.005,0.01]]
cov1 = [[.01,-0.005],[-0.005,.01]]
cov2 = [[0.01,.005],[.005,0.01]]
cov = [cov0, cov1, cov2]
num = [150, 200, 150]
for k in range(len(centers)):
center = centers[k]
data = np.zeros((num[k], 2))
print data.shape
for i in range(num[k]):
x = np.random.multivariate_normal(center, cov[k], 1)
data[i] = x
data_set.append(data)
return data_set, 500
def norm(x, mu, cov):
c = x - mu
cov_i = np.linalg.inv(cov)
cov_d = np.linalg.det(cov)
D = len(x)
normalization = 1.0/((2*np.pi)**D*cov_d)**0.5
return normalization*np.exp(-0.5*(np.transpose(c).dot(cov_i).dot(c)))
def expectation(X, pi, mu, cov, K):
responsibility = np.zeros((len(X), K))
for i in range(len(X)):
x = X[i]
for k in range(K):
numerator = pi[k]*norm(x, mu[k], cov[k])
denominator = 0.
for j in range(K):
denominator += pi[j]*norm(x, mu[j], cov[j])
res = numerator/denominator
responsibility[i, k] = res
return responsibility
def maximization(X, mu, resp, K=3):
N = len(X)
Nk = []
D = len(X[0])
new_cov = []
new_pi = []
for k in range(K):
Nk.append(np.sum(resp[:, k], axis=0))
for k in range(K):
s = 0.
for i in range(N):
s += resp[i, k]*X[i]
mu[k] = s/Nk[k]
for k in range(K):
s = np.zeros((D, D))
for i in range(N):
s += resp[i, k]*np.outer((X[i]-mu[k]), np.transpose((X[i]-mu[k])))
new_cov.append(s/Nk[k])
for k in range(K):
new_pi.append(Nk[k]/N)
return mu, new_cov, new_pi
def likelihood(X, mu, cov, pi, K=3):
N = len(X)
lkhood = 0.
for i in range(N):
x = X[i]
inner = 0.
for k in range(K):
inner += pi[k]*norm(x, mu[k], cov[k])
inner = np.log(inner)
lkhood += inner
return lkhood
def EM(X, K=3):
cov = [np.array([[0.01, 0.], [0, 0.01]])]*3
mu = np.array([[0.25, 0.5], [0.5, 0.5], [0.75, 0.5]])
pi = [0.33, 0.33, 0.34]
lkhood = 0.
while True:
resp = expectation(X, pi, mu, cov, K)
mu, cov, pi = maximization(X, mu, resp)
new_lkhood = likelihood(X, mu, cov, pi)
if np.abs(new_lkhood-lkhood) < 0.0001:
break
if new_lkhood > lkhood:
lkhood = new_lkhood
return mu, cov, pi, resp
#for testing simulated data
def cook_data(data_set):
data = np.zeros((500,2))
classes = np.zeros(len(data))
idx = 0
for i in range(len(data_set)):
for j in range(len(data_set[i])):
data[idx] = data_set[i][j]
classes[idx] = i
idx += 1
return data, classes
def draw(X, classes, colored=True):
mycm = mpl.cm.get_cmap('Paired')
if colored == False:
classes = np.zeros(len(classes))
plt.scatter(X[:, 0], X[:, 1], s=50, c=classes, cmap=mycm)
plt.show()
def max_c(resp):
max_r = 0.
best_i = None
for i in range(len(resp)):
if resp[i] > max_r:
max_r = resp[i]
best_i = i
return best_i
if __name__ == '__main__':
data, N = simulate_data();
X, cls = cook_data(data);
print data
print cls
draw(X, cls, False);
mu, cov, pi, resp = EM(X)
print "mu:"
print mu
print "cov0:"
print cov[0]
print "cov1:"
print cov[1]
print "cov2:"
print cov[2]
print pi
for i in range(N):
cls[i] = max_c(resp[i])
draw(X, cls, True)
|
mit
|
LiaoPan/scikit-learn
|
sklearn/decomposition/nmf.py
|
16
|
19101
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
|
bsd-3-clause
|
sergpolly/Thermal_adapt_scripts
|
narrow_to_complete_genomes.py
|
1
|
4994
|
import re
import os
import sys
from Bio import Seq
from Bio import SeqIO
from Bio import Entrez as ncbi
import xml.etree.ElementTree as ET
import pandas as pd
import itertools
import numpy as np
import copy
# path = os.path.join(os.path.expanduser('~'),'GENOMES_BACTER/ftp.ncbi.nih.gov/refseq/release/bacteria/genbank')
path = os.path.join(os.path.expanduser('~'),'GENOMES_BACTER_RELEASE69/genbank')
env_fname = "env_catalog_bacter.dat"
descriptions_fname = "genbank.inventory.description"
env_df = pd.read_csv(os.path.join(path,env_fname))
descriptions = pd.read_csv(os.path.join(path,descriptions_fname))
# Expect some silly warnings, like this:
# Columns (11) have mixed types
bio_match = re.compile("BioProject:(.+)")
taxon_match = re.compile("taxon:(.+)")
def extract_bio_uid(dbxref):
uids = str(dbxref).strip().replace(';',' ').split(' ')
matched_uids = [bio_match.match(uid) for uid in uids if bio_match.match(uid)]
# bio_match.match(uid) is executed twice here, but who cares ...
if matched_uids:
accession = matched_uids[0].groups()[0]
uid = re.match("[A-Z]+(\d+)",accession).groups()[0]
return int(uid)
# return just a single BioProject uid, I believe that would be correct 100% of times
else:
return np.nan
def extract_taxon(dbxref):
uids = str(dbxref).strip().replace(';',' ').split(' ')
matched_txs = [taxon_match.match(uid) for uid in uids if taxon_match.match(uid)]
# taxon_match.match(uid) is executed twice here, but who cares ...
if matched_txs:
uid = matched_txs[0].groups()[0]
return int(uid)
# return just a single taxon uid, I believe that would be correct 100% of times
else:
return np.nan
# extract solely BioProject uid first ...
descriptions['BioProject'] = descriptions['DbxRefs'].apply(extract_bio_uid)
# extract solely Taxonomy uid first ...
descriptions['TaxonID'] = descriptions['SourceDbxRefs'].apply(extract_taxon)
# make BioProject UID an index
env_df = env_df.set_index('BioProject', drop=True, append=False)
# DB-style outter join of the tables
des = descriptions.join(env_df, on='BioProject', lsuffix='_des', rsuffix='_env')
# Entries with known temperature ...
des_temp = des[(des.OptimumTemperature.notnull())]
# get those that are not plasmids
noplasm_idx = des_temp.SourcePlasmid.isnull().nonzero()[0]
des_noplasmid = copy.deepcopy(des_temp.iloc[noplasm_idx])
# criteria for the Complete Genome ...
crit_NC = lambda name : True if (name[:3]=='NC_') else False
crit_comp_genome = lambda descr: True if re.search('complete genome',descr) else False
crit_features = lambda nf: True if (nf>=900) else False
crit_genlen = lambda genlen: True if (genlen>=300000) else False
crit_WGS = lambda keywords: False if re.search('WGS',keywords) else True
crit_plasmid = lambda descr: False if re.search('plasmid',descr) else True
# evaluate those criteria ...
des_noplasmid['crit_NC'] = des_noplasmid.GenomicID.apply(crit_NC)
des_noplasmid['crit_WGS'] = des_noplasmid.Keywords.apply(crit_WGS)
des_noplasmid['crit_genlen'] = des_noplasmid.GenomicLen.apply(crit_genlen)
des_noplasmid['crit_features'] = des_noplasmid.FeaturesNum.apply(crit_features)
des_noplasmid['crit_comp_genome'] = des_noplasmid.Description.apply(crit_comp_genome)
des_noplasmid['crit_plasmid'] = des_noplasmid.Description.apply(crit_plasmid)
# get those items that satisfy these criteria ...
criteria_to_satisfy = ['crit_NC', 'crit_WGS', 'crit_genlen', 'crit_features', 'crit_comp_genome', 'crit_plasmid']
indexes_satisfy = (des_noplasmid[criteria_to_satisfy].sum(axis=1)==len(criteria_to_satisfy)).nonzero()[0]
# des_valid = copy.deepcopy(des_noplasmid.iloc[indexes_satisfy].OptimumTemperature.notnull())
des_valid = copy.deepcopy(des_noplasmid.iloc[indexes_satisfy])
# now collapse non-unique things,
# there shouldn't be many of them, at this point
# [CHECK] - just 3 of them found for the RELEASE69 ...
cleaned_data = des_valid.drop_duplicates(subset='TaxonID')
check_bp = cleaned_data.duplicated(subset='BioProject').nonzero()[0].size
check_org1 = cleaned_data.duplicated(subset='Organism_des').nonzero()[0].size
check_org2 = cleaned_data.duplicated(subset='Organism_env').nonzero()[0].size
if (check_bp+check_org1+check_org2)==0:
print "Data looks extremely clean: no duplicates, complete genomes only, opt temps available ..."
elif check_bp:
print "WARNING: There are duplicated BioProject UIDs in the final data"
elif check_org1:
print "WARNING: There are duplicated Organisms in the final data"
elif check_org2:
print "WARNING: There are duplicated Organisms in the final data"
# some information summary ...
thermophiles_counts = (cleaned_data.OptimumTemperature>=50).nonzero()[0].size
total_counts = cleaned_data.shape[0]
print "Cleaned data summary: %d thermophiles (OGT>=50) out of %d species in total."
# GenomicID is the best candidate to be an index ...
cleaned_data.to_csv("env_catalog_compgenome.dat",index=False)
|
mit
|
pschella/scipy
|
scipy/spatial/kdtree.py
|
23
|
38086
|
# Copyright Anne M. Archibald 2008
# Released under the scipy license
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from heapq import heappush, heappop
import scipy.sparse
__all__ = ['minkowski_distance_p', 'minkowski_distance',
'distance_matrix',
'Rectangle', 'KDTree']
def minkowski_distance_p(x, y, p=2):
"""
Compute the p-th power of the L**p distance between two arrays.
For efficiency, this function computes the L**p distance but does
not extract the pth root. If `p` is 1 or infinity, this is equal to
the actual L**p distance.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance_p
>>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])
array([2, 1])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf:
return np.amax(np.abs(y-x), axis=-1)
elif p == 1:
return np.sum(np.abs(y-x), axis=-1)
else:
return np.sum(np.abs(y-x)**p, axis=-1)
def minkowski_distance(x, y, p=2):
"""
Compute the L**p distance between two arrays.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance
>>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])
array([ 1.41421356, 1. ])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf or p == 1:
return minkowski_distance_p(x, y, p)
else:
return minkowski_distance_p(x, y, p)**(1./p)
class Rectangle(object):
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes,mins).astype(float)
self.mins = np.minimum(maxes,mins).astype(float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
def volume(self):
"""Total volume."""
return np.prod(self.maxes-self.mins)
def split(self, d, split):
"""
Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split : float
Position along axis `d` to split at.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
def min_distance_point(self, x, p=2.):
"""
Return the minimum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p)
def max_distance_point(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input array.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p)
def min_distance_rectangle(self, other, p=2.):
"""
Compute the minimum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p)
def max_distance_rectangle(self, other, p=2.):
"""
Compute the maximum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p)
class KDTree(object):
"""
kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-dimensional points which
can be used to rapidly look up the nearest neighbors of any point.
Parameters
----------
data : (N,K) array_like
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : int, optional
The number of points at which the algorithm switches over to
brute-force. Has to be positive.
Raises
------
RuntimeError
The maximum recursion limit can be exceeded for large data
sets. If this happens, either increase the value for the `leafsize`
parameter or increase the recursion limit by::
>>> import sys
>>> sys.setrecursionlimit(10000)
See Also
--------
cKDTree : Implementation of `KDTree` in Cython
Notes
-----
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
def __init__(self, data, leafsize=10):
self.data = np.asarray(data)
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize < 1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data,axis=0)
self.mins = np.amin(self.data,axis=0)
self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
class node(object):
if sys.version_info[0] >= 3:
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return id(self) == id(other)
class leafnode(node):
def __init__(self, idx):
self.idx = idx
self.children = len(idx)
class innernode(node):
def __init__(self, split_dim, split, less, greater):
self.split_dim = split_dim
self.split = split
self.less = less
self.greater = greater
self.children = less.children+greater.children
def __build(self, idx, maxes, mins):
if len(idx) <= self.leafsize:
return KDTree.leafnode(idx)
else:
data = self.data[idx]
# maxes = np.amax(data,axis=0)
# mins = np.amin(data,axis=0)
d = np.argmax(maxes-mins)
maxval = maxes[d]
minval = mins[d]
if maxval == minval:
# all points are identical; warn user?
return KDTree.leafnode(idx)
data = data[:,d]
# sliding midpoint rule; see Maneewongvatana and Mount 1999
# for arguments that this is a good idea.
split = (maxval+minval)/2
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(less_idx) == 0:
split = np.amin(data)
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(greater_idx) == 0:
split = np.amax(data)
less_idx = np.nonzero(data < split)[0]
greater_idx = np.nonzero(data >= split)[0]
if len(less_idx) == 0:
# _still_ zero? all must have the same value
if not np.all(data == data[0]):
raise ValueError("Troublesome data array: %s" % data)
split = data[0]
less_idx = np.arange(len(data)-1)
greater_idx = np.array([len(data)-1])
lessmaxes = np.copy(maxes)
lessmaxes[d] = split
greatermins = np.copy(mins)
greatermins[d] = split
return KDTree.innernode(d, split,
self.__build(idx[less_idx],lessmaxes,mins),
self.__build(idx[greater_idx],maxes,greatermins))
def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x))
if p != np.inf:
side_distances **= p
min_distance = np.sum(side_distances)
else:
min_distance = np.amax(side_distances)
# priority queue for chasing nodes
# entries are:
# minimum distance between the cell and the target
# distances between the nearest side of the cell and the target
# the head node of the cell
q = [(min_distance,
tuple(side_distances),
self.tree)]
# priority queue for the nearest neighbors
# furthest known neighbor first
# entries are (-distance**p, i)
neighbors = []
if eps == 0:
epsfac = 1
elif p == np.inf:
epsfac = 1/(1+eps)
else:
epsfac = 1/(1+eps)**p
if p != np.inf and distance_upper_bound != np.inf:
distance_upper_bound = distance_upper_bound**p
while q:
min_distance, side_distances, node = heappop(q)
if isinstance(node, KDTree.leafnode):
# brute-force
data = self.data[node.idx]
ds = minkowski_distance_p(data,x[np.newaxis,:],p)
for i in range(len(ds)):
if ds[i] < distance_upper_bound:
if len(neighbors) == k:
heappop(neighbors)
heappush(neighbors, (-ds[i], node.idx[i]))
if len(neighbors) == k:
distance_upper_bound = -neighbors[0][0]
else:
# we don't push cells that are too far onto the queue at all,
# but since the distance_upper_bound decreases, we might get
# here even if the cell's too far
if min_distance > distance_upper_bound*epsfac:
# since this is the nearest cell, we're done, bail out
break
# compute minimum distances to the children and push them on
if x[node.split_dim] < node.split:
near, far = node.less, node.greater
else:
near, far = node.greater, node.less
# near child is at the same distance as the current node
heappush(q,(min_distance, side_distances, near))
# far child is further by an amount depending only
# on the split value
sd = list(side_distances)
if p == np.inf:
min_distance = max(min_distance, abs(node.split-x[node.split_dim]))
elif p == 1:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
else:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])**p
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
# far child might be too far, if so, don't bother pushing it
if min_distance <= distance_upper_bound*epsfac:
heappush(q,(min_distance, tuple(sd), far))
if p == np.inf:
return sorted([(-d,i) for (d,i) in neighbors])
else:
return sorted([((-d)**(1./p),i) for (d,i) in neighbors])
def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
"""
Query the kd-tree for nearest neighbors
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : int, optional
The number of nearest neighbors to return.
eps : nonnegative float, optional
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
distance_upper_bound : nonnegative float, optional
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors (e.g. when k > n or distance_upper_bound is
given) are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : integer or array of integers
The locations of the neighbors in self.data. i is the same
shape as d.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 2:8]
>>> tree = spatial.KDTree(list(zip(x.ravel(), y.ravel())))
>>> tree.data
array([[0, 2],
[0, 3],
[0, 4],
[0, 5],
[0, 6],
[0, 7],
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6],
[1, 7],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 6],
[2, 7],
[3, 2],
[3, 3],
[3, 4],
[3, 5],
[3, 6],
[3, 7],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 7]])
>>> pts = np.array([[0, 0], [2.1, 2.9]])
>>> tree.query(pts)
(array([ 2. , 0.14142136]), array([ 0, 13]))
>>> tree.query(pts[0])
(2.0, 0)
"""
x = np.asarray(x)
if np.shape(x)[-1] != self.m:
raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
if p < 1:
raise ValueError("Only p-norms with 1<=p<=infinity permitted")
retshape = np.shape(x)[:-1]
if retshape != ():
if k is None:
dd = np.empty(retshape,dtype=object)
ii = np.empty(retshape,dtype=object)
elif k > 1:
dd = np.empty(retshape+(k,),dtype=float)
dd.fill(np.inf)
ii = np.empty(retshape+(k,),dtype=int)
ii.fill(self.n)
elif k == 1:
dd = np.empty(retshape,dtype=float)
dd.fill(np.inf)
ii = np.empty(retshape,dtype=int)
ii.fill(self.n)
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
for c in np.ndindex(retshape):
hits = self.__query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
dd[c] = [d for (d,i) in hits]
ii[c] = [i for (d,i) in hits]
elif k > 1:
for j in range(len(hits)):
dd[c+(j,)], ii[c+(j,)] = hits[j]
elif k == 1:
if len(hits) > 0:
dd[c], ii[c] = hits[0]
else:
dd[c] = np.inf
ii[c] = self.n
return dd, ii
else:
hits = self.__query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
return [d for (d,i) in hits], [i for (d,i) in hits]
elif k == 1:
if len(hits) > 0:
return hits[0]
else:
return np.inf, self.n
elif k > 1:
dd = np.empty(k,dtype=float)
dd.fill(np.inf)
ii = np.empty(k,dtype=int)
ii.fill(self.n)
for j in range(len(hits)):
dd[j], ii[j] = hits[j]
return dd, ii
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
def __query_ball_point(self, x, r, p=2., eps=0):
R = Rectangle(self.maxes, self.mins)
def traverse_checking(node, rect):
if rect.min_distance_point(x, p) > r / (1. + eps):
return []
elif rect.max_distance_point(x, p) < r * (1. + eps):
return traverse_no_checking(node)
elif isinstance(node, KDTree.leafnode):
d = self.data[node.idx]
return node.idx[minkowski_distance(d, x, p) <= r].tolist()
else:
less, greater = rect.split(node.split_dim, node.split)
return traverse_checking(node.less, less) + \
traverse_checking(node.greater, greater)
def traverse_no_checking(node):
if isinstance(node, KDTree.leafnode):
return node.idx.tolist()
else:
return traverse_no_checking(node.less) + \
traverse_no_checking(node.greater)
return traverse_checking(self.tree, R)
def query_ball_point(self, x, r, p=2., eps=0):
"""Find all points within distance r of point(s) x.
Parameters
----------
x : array_like, shape tuple + (self.m,)
The point or points to search for neighbors of.
r : positive float
The radius of points to return.
p : float, optional
Which Minkowski p-norm to use. Should be in the range [1, inf].
eps : nonnegative float, optional
Approximate search. Branches of the tree are not explored if their
nearest points are further than ``r / (1 + eps)``, and branches are
added in bulk if their furthest points are nearer than
``r * (1 + eps)``.
Returns
-------
results : list or array of lists
If `x` is a single point, returns a list of the indices of the
neighbors of `x`. If `x` is an array of points, returns an object
array of shape tuple containing lists of neighbors.
Notes
-----
If you have many points whose neighbors you want to find, you may save
substantial amounts of time by putting them in a KDTree and using
query_ball_tree.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 0:5]
>>> points = zip(x.ravel(), y.ravel())
>>> tree = spatial.KDTree(points)
>>> tree.query_ball_point([2, 0], 1)
[5, 10, 11, 15]
Query multiple points and plot the results:
>>> import matplotlib.pyplot as plt
>>> points = np.asarray(points)
>>> plt.plot(points[:,0], points[:,1], '.')
>>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
... nearby_points = points[results]
... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
>>> plt.margins(0.1, 0.1)
>>> plt.show()
"""
x = np.asarray(x)
if x.shape[-1] != self.m:
raise ValueError("Searching for a %d-dimensional point in a "
"%d-dimensional KDTree" % (x.shape[-1], self.m))
if len(x.shape) == 1:
return self.__query_ball_point(x, r, p, eps)
else:
retshape = x.shape[:-1]
result = np.empty(retshape, dtype=object)
for c in np.ndindex(retshape):
result[c] = self.__query_ball_point(x[c], r, p=p, eps=eps)
return result
def query_ball_tree(self, other, r, p=2., eps=0):
"""Find all pairs of points whose distance is at most r
Parameters
----------
other : KDTree instance
The tree containing points to search against.
r : float
The maximum distance, has to be positive.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : list of lists
For each element ``self.data[i]`` of this tree, ``results[i]`` is a
list of the indices of its neighbors in ``other.data``.
"""
results = [[] for i in range(self.n)]
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
d = other.data[node2.idx]
for i in node1.idx:
results[i] += node2.idx[minkowski_distance(d,self.data[i],p) <= r].tolist()
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
results[i] += node2.idx.tolist()
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return results
def query_pairs(self, r, p=2., eps=0):
"""
Find all pairs of points within a distance.
Parameters
----------
r : positive float
The maximum distance.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : set
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close.
"""
results = set()
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
else:
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) != id(node2):
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
else:
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) == id(node2):
traverse_no_checking(node1.less, node2.less)
traverse_no_checking(node1.less, node2.greater)
traverse_no_checking(node1.greater, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
self.tree, Rectangle(self.maxes, self.mins))
return results
def count_neighbors(self, other, r, p=2.):
"""
Count how many nearby pairs can be formed.
Count the number of pairs (x1,x2) can be formed, with x1 drawn
from self and x2 drawn from `other`, and where
``distance(x1, x2, p) <= r``.
This is the "two-point correlation" described in Gray and Moore 2000,
"N-body problems in statistical learning", and the code here is based
on their algorithm.
Parameters
----------
other : KDTree instance
The other tree to draw points from.
r : float or one-dimensional array of floats
The radius to produce a count for. Multiple radii are searched with
a single tree traversal.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use
Returns
-------
result : int or 1-D array of ints
The number of pairs. Note that this is internally stored in a numpy
int, and so may overflow if very large (2e9).
"""
def traverse(node1, rect1, node2, rect2, idx):
min_r = rect1.min_distance_rectangle(rect2,p)
max_r = rect1.max_distance_rectangle(rect2,p)
c_greater = r[idx] > max_r
result[idx[c_greater]] += node1.children*node2.children
idx = idx[(min_r <= r[idx]) & (r[idx] <= max_r)]
if len(idx) == 0:
return
if isinstance(node1,KDTree.leafnode):
if isinstance(node2,KDTree.leafnode):
ds = minkowski_distance(self.data[node1.idx][:,np.newaxis,:],
other.data[node2.idx][np.newaxis,:,:],
p).ravel()
ds.sort()
result[idx] += np.searchsorted(ds,r[idx],side='right')
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1, rect1, node2.less, less, idx)
traverse(node1, rect1, node2.greater, greater, idx)
else:
if isinstance(node2,KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less, less, node2, rect2, idx)
traverse(node1.greater, greater, node2, rect2, idx)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2,idx)
traverse(node1.less,less1,node2.greater,greater2,idx)
traverse(node1.greater,greater1,node2.less,less2,idx)
traverse(node1.greater,greater1,node2.greater,greater2,idx)
R1 = Rectangle(self.maxes, self.mins)
R2 = Rectangle(other.maxes, other.mins)
if np.shape(r) == ():
r = np.array([r])
result = np.zeros(1,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(1))
return result[0]
elif len(np.shape(r)) == 1:
r = np.asarray(r)
n, = r.shape
result = np.zeros(n,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(n))
return result
else:
raise ValueError("r must be either a single value or a one-dimensional array of values")
def sparse_distance_matrix(self, other, max_distance, p=2.):
"""
Compute a sparse distance matrix
Computes a distance matrix between two KDTrees, leaving as zero
any distance greater than max_distance.
Parameters
----------
other : KDTree
max_distance : positive float
p : float, optional
Returns
-------
result : dok_matrix
Sparse matrix representing the results in "dictionary of keys" format.
"""
result = scipy.sparse.dok_matrix((self.n,other.n))
def traverse(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > max_distance:
return
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
for j in node2.idx:
d = minkowski_distance(self.data[i],other.data[j],p)
if d <= max_distance:
result[i,j] = d
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1,rect1,node2.less,less)
traverse(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less,less,node2,rect2)
traverse(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2)
traverse(node1.less,less1,node2.greater,greater2)
traverse(node1.greater,greater1,node2.less,less2)
traverse(node1.greater,greater1,node2.greater,greater2)
traverse(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return result
def distance_matrix(x, y, p=2, threshold=1000000):
"""
Compute the distance matrix.
Returns the matrix of all pair-wise distances.
Parameters
----------
x : (M, K) array_like
Matrix of M vectors in K dimensions.
y : (N, K) array_like
Matrix of N vectors in K dimensions.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
threshold : positive int
If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
of large temporary arrays.
Returns
-------
result : (M, N) ndarray
Matrix containing the distance from every vector in `x` to every vector
in `y`.
Examples
--------
>>> from scipy.spatial import distance_matrix
>>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
array([[ 1. , 1.41421356],
[ 1.41421356, 1. ]])
"""
x = np.asarray(x)
m, k = x.shape
y = np.asarray(y)
n, kk = y.shape
if k != kk:
raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk))
if m*n*k <= threshold:
return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
else:
result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype
if m < n:
for i in range(m):
result[i,:] = minkowski_distance(x[i],y,p)
else:
for j in range(n):
result[:,j] = minkowski_distance(x,y[j],p)
return result
|
bsd-3-clause
|
yonglehou/scikit-learn
|
examples/plot_multilabel.py
|
236
|
4157
|
# Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
|
bsd-3-clause
|
elkingtonmcb/scikit-learn
|
sklearn/cluster/bicluster.py
|
211
|
19443
|
"""Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
|
bsd-3-clause
|
quietcoolwu/myBuildingMachineLearningSystemsWithPython-second_edition
|
ch02/figure4_5_sklearn.py
|
22
|
2475
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
COLOUR_FIGURE = False
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from load import load_dataset
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
feature_names = [
'area',
'perimeter',
'compactness',
'length of kernel',
'width of kernel',
'asymmetry coefficien',
'length of kernel groove',
]
def plot_decision(features, labels, num_neighbors=1):
'''Plots decision boundary for KNN
Parameters
----------
features : ndarray
labels : sequence
Returns
-------
fig : Matplotlib Figure
ax : Matplotlib Axes
'''
y0, y1 = features[:, 2].min() * .9, features[:, 2].max() * 1.1
x0, x1 = features[:, 0].min() * .9, features[:, 0].max() * 1.1
X = np.linspace(x0, x1, 1000)
Y = np.linspace(y0, y1, 1000)
X, Y = np.meshgrid(X, Y)
model = KNeighborsClassifier(num_neighbors)
model.fit(features[:, (0,2)], labels)
C = model.predict(np.vstack([X.ravel(), Y.ravel()]).T).reshape(X.shape)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .7, .7), (.7, 1., .7), (.7, .7, 1.)])
else:
cmap = ListedColormap([(1., 1., 1.), (.2, .2, .2), (.6, .6, .6)])
fig,ax = plt.subplots()
ax.set_xlim(x0, x1)
ax.set_ylim(y0, y1)
ax.set_xlabel(feature_names[0])
ax.set_ylabel(feature_names[2])
ax.pcolormesh(X, Y, C, cmap=cmap)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .0, .0), (.1, .6, .1), (.0, .0, 1.)])
ax.scatter(features[:, 0], features[:, 2], c=labels, cmap=cmap)
else:
for lab, ma in zip(range(3), "Do^"):
ax.plot(features[labels == lab, 0], features[
labels == lab, 2], ma, c=(1., 1., 1.), ms=6)
return fig,ax
features, labels = load_dataset('seeds')
names = sorted(set(labels))
labels = np.array([names.index(ell) for ell in labels])
fig,ax = plot_decision(features, labels)
fig.tight_layout()
fig.savefig('figure4sklearn.png')
features -= features.mean(0)
features /= features.std(0)
fig,ax = plot_decision(features, labels)
fig.tight_layout()
fig.savefig('figure5sklearn.png')
fig,ax = plot_decision(features, labels, 11)
fig.tight_layout()
fig.savefig('figure5sklearn_with_11_neighbors.png')
|
mit
|
jaidevd/scikit-learn
|
examples/linear_model/plot_sgd_separating_hyperplane.py
|
84
|
1221
|
"""
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([[x1, x2]])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
smarden1/airflow
|
airflow/hooks/base_hook.py
|
5
|
1379
|
import logging
import random
from airflow import settings
from airflow.models import Connection
from airflow.utils import AirflowException
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
def get_connections(self, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
session.expunge_all()
session.close()
return db
def get_connection(self, conn_id):
conn = random.choice(self.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
def get_conn(self):
raise NotImplemented()
def get_records(self, sql):
raise NotImplemented()
def get_pandas_df(self, sql):
raise NotImplemented()
def run(self, sql):
raise NotImplemented()
|
apache-2.0
|
savkov/bioeval
|
tests/test_bioeval.py
|
1
|
10859
|
import os
import sys
import math
import warnings
import traceback
import pandas as pd
from unittest import TestCase
from bioeval import evaluate, evaluate_df, get_ncor
from bioeval.utils import *
from iterpipes3 import check_call, cmd
__author__ = 'Aleksandar Savkov'
class TestBIOEval(TestCase):
def test_equality(self):
gold = {
((1, 'Gold', 'N', 'B-NP'),),
((2, 'is', 'V', 'B-MV'),),
((3, 'green', 'J', 'B-AP'),),
((4, '.', '.', 'O'),),
(
(5, 'The', 'D', 'B-NP'),
(6, 'red', 'J', 'I-NP'),
(7, 'square', 'N', 'I-NP')
),
((8, 'is', 'V', 'B-MV'),),
(
(9, 'very', 'A', 'B-AP'),
(10, 'boring', 'J', 'I-AP')
),
((8, '.', '.', 'O'),)
}
guess = {
((1, 'Gold', 'N', 'B-NP'),),
((2, 'is', 'V', 'B-MV'),),
((3, 'green', 'J', 'B-AP'),),
((4, '.', '.', 'O'),),
(
(5, 'The', 'D', 'B-NP'),
(6, 'red', 'J', 'I-NP'),
(7, 'square', 'N', 'I-NP')
),
((8, 'is', 'V', 'B-MV'),),
(
(9, 'very', 'A', 'B-AP'),
(10, 'boring', 'J', 'I-AP')
),
((8, '.', '.', 'O'),)
}
f1, pr, re = evaluate(gold, guess)
self.assertEqual(f1, 100)
def test_one_diff(self):
gold = {
((1, 'Gold', 'N', 'B-NP'),),
((2, 'is', 'V', 'B-MV'),),
((3, 'green', 'J', 'B-AP'),),
((4, '.', '.', 'O'),),
(
(5, 'The', 'D', 'B-NP'),
(6, 'red', 'J', 'I-NP'),
(7, 'square', 'N', 'I-NP')
),
((8, 'is', 'V', 'B-MV'),),
(
(9, 'very', 'A', 'B-AP'),
(10, 'boring', 'J', 'I-AP')
),
((8, '.', '.', 'O'),)
}
guess = {
((1, 'Gold', 'N', 'B-NP'),),
((2, 'is', 'V', 'B-MV'),),
((3, 'green', 'J', 'B-AP'),),
((4, '.', '.', 'O'),),
(
(5, 'The', 'D', 'B-NP'),
(6, 'red', 'J', 'I-NP'),
(7, 'square', 'N', 'I-NP')
),
((8, 'is', 'V', 'B-MV'),),
(
(9, 'very', 'A', 'B-AP'),
(10, 'boring', 'J', 'I-AP')
),
((8, '.', '.', '.'),)
}
f1, pr, re = evaluate(gold, guess, do_round=False)
self.assertAlmostEqual(f1, fscore(6.0/7, 6.0/6))
def test_one_miss(self):
gold = {
((1, 'Gold', 'N', 'B-NP'),),
((2, 'is', 'V', 'B-MV'),),
((3, 'green', 'J', 'B-AP'),),
((4, '.', '.', 'O'),),
(
(5, 'The', 'D', 'B-NP'),
(6, 'red', 'J', 'I-NP'),
(7, 'square', 'N', 'I-NP')
),
((8, 'is', 'V', 'B-MV'),),
(
(9, 'very', 'A', 'B-AP'),
(10, 'boring', 'J', 'I-AP')
),
((8, '.', '.', 'O'),)
}
guess = {
((1, 'Gold', 'N', 'B-NP'),),
((2, 'is', 'V', 'B-MV'),),
((3, 'green', 'J', 'B-AP'),),
((4, '.', '.', 'O'),),
(
(5, 'The', 'D', 'B-NP'),
(6, 'red', 'J', 'I-NP'),
(7, 'square', 'N', 'I-NP')
),
((8, 'is', 'V', 'B-MV'),),
(
(9, 'very', 'A', 'B-AP'),
(10, 'boring', 'J', 'I-AP')
)
}
with self.assertRaises(AssertionError):
evaluate(gold, guess)
def test_one_diff_each(self):
gold = {
((1, 'Gold', 'N', 'B-NP'),),
((2, 'is', 'V', 'B-MV'),),
((3, 'green', 'J', 'B-AP'),),
((4, '.', '.', 'B-NP'),),
(
(5, 'The', 'D', 'B-NP'),
(6, 'red', 'J', 'I-NP'),
(7, 'square', 'N', 'I-NP')
),
((8, 'is', 'V', 'B-MV'),),
(
(9, 'very', 'A', 'B-AP'),
(10, 'boring', 'J', 'I-AP')
),
((8, '.', '.', 'O'),)
}
guess = {
((1, 'Gold', 'N', 'B-NP'),),
((2, 'is', 'V', 'B-MV'),),
((3, 'green', 'J', 'B-AP'),),
((4, '.', '.', 'O'),),
(
(5, 'The', 'D', 'B-NP'),
(6, 'red', 'J', 'I-NP'),
(7, 'square', 'N', 'I-NP')
),
((8, 'is', 'V', 'B-MV'),),
(
(9, 'very', 'A', 'B-AP'),
(10, 'boring', 'J', 'I-AP')
),
((8, '.', '.', 'B-NP'),)
}
f1, pr, re = evaluate(gold, guess)
f1_exact, _, _ = evaluate(gold, guess, do_round=False)
self.assertEqual(f1, round(fscore(6.0/7, 6.0/7), 2))
self.assertAlmostEqual(f1_exact, fscore(6.0/7, 6.0/7))
def test_ncor(self):
# change that to 1000+ if you want real testing
rep = 10
for i in range(rep):
n = 10000
ncor = math.floor(n * np.random.uniform(0.1, 1.0))
gold, guess = mock_chunks(n=n, ncor=np.int(ncor))
if len(gold) != len(guess):
print(ncor, len(gold), len(guess))
continue
nc = len(get_ncor(gold, guess))
self.assertAlmostEqual(nc, ncor, msg=(i,
len(gold),
len(guess),
nc,
ncor))
def test_df(self):
df = pd.DataFrame(
[
{'form': 'foo', 'pos': 'bar', 'chunktag': 'B-foo',
'guesstag': 'B-foo'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'I-foo',
'guesstag': 'I-foo'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'O',
'guesstag': 'O'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'B-bar',
'guesstag': 'B-bar'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'B-foo',
'guesstag': 'B-foo'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'O',
'guesstag': 'O'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'B-foo',
'guesstag': 'B-foo'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'I-foo',
'guesstag': 'I-foo'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'B-bar',
'guesstag': 'B-bar'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'I-bar',
'guesstag': 'I-bar'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'O',
'guesstag': 'O'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'B-foo',
'guesstag': 'B-foo'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'B-bar',
'guesstag': 'I-foo'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'B-foo',
'guesstag': 'B-foo'},
{'form': 'foo', 'pos': 'bar', 'chunktag': 'I-foo',
'guesstag': 'B-foo'}
]
)
f1, pr, re = evaluate_df(df, do_round=True)
real_f1 = round(fscore(5/8, 5/8), 2)
print(f1, real_f1)
self.assertEqual(f1, real_f1)
class TestBIOEvalSpecial(TestCase):
# make sure it runs from project root directory
@staticmethod
def _conll_eval(fp):
"""Evaluates a conll-style data file using the conll-2000 perl script.
:param fp: file path
:return: f1-score, precision, recall
"""
cwd = '.'
try:
os.mkdir('tmp')
except OSError:
pass
fpres = 'tmp/results.%s' % random_str()
fh_out = open(fpres, 'w')
if '\t' in open(fp, 'r').readline():
warnings.warn('Wrong tab column separator. Use tabs (\\t).')
try:
check_call(cmd('perl prl/conll_eval.pl -l < {}', fp, cwd=cwd,
stdout=fh_out))
except Exception:
warnings.warn("Exception ocurred during Evaluation.")
exc_type, exc_value, exc_traceback = sys.exc_info()
print("*** print_tb:")
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print("*** print_exception:")
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
res = AccuracyResults()
res.parse_conll_eval_table(fpres)
os.remove(fpres)
return res['Total']['fscore'], res['Total']['precision'], \
res['Total']['recall']
@staticmethod
def _ssv2set(ssv):
"""Converts a SSVList with chunk tags and guess tags into two sets of
chunk tuples -- gold and guess.
:param ssv: data
:return: :raise ValueError:
"""
go, ge = set(), set()
if ssv[0].chunktag[0] not in 'BOS':
raise ValueError('Invalid chunktag on first token.')
if ssv[0].guesstag[0] not in 'BOS':
raise ValueError('Invalid guesstag on first token.')
chunk_go = [(0, ssv[0].form, ssv[0].postag, ssv[0].chunktag)]
chunk_ge = [(0, ssv[0].form, ssv[0].postag, ssv[0].guesstag)]
for tid, r in enumerate(ssv[1:], start=1):
if r.chunktag[0] in 'BOS':
# start new
go.add(tuple(chunk_go))
chunk_go = [(tid, r.form, r.postag, r.chunktag)]
else:
# continue chunk
chunk_go.append((tid, r.form, r.postag, r.chunktag))
if r.guesstag[0] in 'BOS':
# start new
ge.add(tuple(chunk_ge))
chunk_ge = [(tid, r.form, r.postag, r.guesstag)]
else:
# continue chunk
chunk_ge.append((tid, r.form, r.postag, r.guesstag))
if chunk_ge:
ge.add(tuple(chunk_ge))
if chunk_go:
go.add(tuple(chunk_go))
return go, ge
def test_against_conll2(self):
fp = 'res/conll_sample.data'
cols = ['form', 'pos', 'chunktag', 'guesstag']
df = pd.read_csv(fp, sep=' ', names=cols)
f1_conll, pre_conll, rec_conll = self._conll_eval(fp)
f1, pre, rec = evaluate_df(df, do_round=True)
self.assertEqual(f1, f1_conll)
self.assertEqual(pre, pre_conll)
self.assertEqual(rec, rec_conll)
|
mit
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/backends/backend_template.py
|
2
|
8926
|
"""
This is a fully functional do nothing backend to provide a template to
backend writers. It is fully functional in that you can select it as
a backend with
import matplotlib
matplotlib.use('Template')
and your matplotlib scripts will (should!) run without error, though
no output is produced. This provides a nice starting point for
backend writers because you can selectively implement methods
(draw_rectangle, draw_lines, etc...) and slowly see your figure come
to life w/o having to have a full blown implementation before getting
any results.
Copy this to backend_xxx.py and replace all instances of 'template'
with 'xxx'. Then implement the class methods and functions below, and
add 'xxx' to the switchyard in matplotlib/backends/__init__.py and
'xxx' to the backends list in the validate_backend methon in
matplotlib/__init__.py and you're off. You can use your backend with::
import matplotlib
matplotlib.use('xxx')
from pylab import *
plot([1,2,3])
show()
matplotlib also supports external backends, so you can place you can
use any module in your PYTHONPATH with the syntax::
import matplotlib
matplotlib.use('module://my_backend')
where my_backend.py is your module name. Thus syntax is also
recognized in the rc file and in the -d argument in pylab, eg::
python simple_plot.py -dmodule://my_backend
The files that are most relevant to backend_writers are
matplotlib/backends/backend_your_backend.py
matplotlib/backend_bases.py
matplotlib/backends/__init__.py
matplotlib/__init__.py
matplotlib/_pylab_helpers.py
Naming Conventions
* classes Upper or MixedUpperCase
* varables lower or lowerUpper
* functions lower or underscore_separated
"""
from __future__ import division, print_function
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
class RendererTemplate(RendererBase):
"""
The renderer handles drawing/rendering operations.
This is a minimal do-nothing class that can be used to get started when
writing a new backend. Refer to backend_bases.RendererBase for
documentation of the classes methods.
"""
def __init__(self, dpi):
self.dpi = dpi
def draw_path(self, gc, path, transform, rgbFace=None):
pass
# draw_markers is optional, and we get more correct relative
# timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
# pass
# draw_path_collection is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_path_collection(self, gc, master_transform, paths,
# all_transforms, offsets, offsetTrans, facecolors,
# edgecolors, linewidths, linestyles,
# antialiaseds):
# pass
# draw_quad_mesh is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
# coordinates, offsets, offsetTrans, facecolors,
# antialiased, edgecolors):
# pass
def draw_image(self, gc, x, y, im):
pass
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
pass
def flipy(self):
return True
def get_canvas_width_height(self):
return 100, 100
def get_text_width_height_descent(self, s, prop, ismath):
return 1, 1, 1
def new_gc(self):
return GraphicsContextTemplate()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
#return points/72.0 * self.dpi.get()
class GraphicsContextTemplate(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasTemplate(figure)
manager = FigureManagerTemplate(canvas, num)
return manager
class FigureCanvasTemplate(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Note GUI templates will want to connect events for button presses,
mouse movements and key presses to functions that call the base
class methods button_press_event, button_release_event,
motion_notify_event, key_press_event, and key_release_event. See,
eg backend_gtk.py, backend_wx.py and backend_tkagg.py
"""
def draw(self):
"""
Draw the figure using the renderer
"""
renderer = RendererTemplate(self.figure.dpi)
self.figure.draw(renderer)
# You should provide a print_xxx function for every file format
# you can write.
# If the file type is not in the base set of filetypes,
# you should add it to the class-scope filetypes dictionary as follows:
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['foo'] = 'My magic Foo format'
def print_foo(self, filename, *args, **kwargs):
"""
Write out format foo. The dpi, facecolor and edgecolor are restored
to their original values after this call, so you don't need to
save and restore them.
"""
pass
def get_default_filetype(self):
return 'foo'
class FigureManagerTemplate(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerTemplate
|
mit
|
googleinterns/sgonks
|
project/services/data_updater/scripts/unit_tests.py
|
1
|
2147
|
#!/usr/bin/env python3
import unittest
import pandas as pd
from datetime import datetime
from fetch_trends import aggregate_hourly_to_daily
from dates import get_end_times, get_start_times
class TestFetch(unittest.TestCase):
def setUp(self):
data = {"test" : [1] * 24}
dates = [datetime.now()] * 24
self.hourly_df = pd.DataFrame(data, index=dates)
longer_data = {"test" : list(range(48))}
longer_dates = [datetime.now()] * 48
self.longer_hourly_df = pd.DataFrame(longer_data, index=longer_dates)
def test_daily_aggregate_all_ones(self):
daily_result = aggregate_hourly_to_daily(self.hourly_df).to_string(index=False)
expected_result = pd.DataFrame({"test" : [24]}).to_string(index=False)
self.assertEqual(daily_result, expected_result, "Incorrect aggregate of hourly data over 1 day")
def test_more_complicated_sum(self):
longer_daily_result = aggregate_hourly_to_daily(self.longer_hourly_df).to_string(index=False)
expected_result = pd.DataFrame({"test" : [sum(range(0,24)), sum(range(24,48))]}).to_string(index=False)
self.assertEqual(longer_daily_result, expected_result, "Incorrect aggregate of hourly data over 2 days")
class TestDates(unittest.TestCase):
def setUp(self):
self.start = get_start_times(0)
self.end = get_end_times()
def test_dates_return_types(self):
self.assertIsInstance(self.start, tuple, "Must return tuple")
self.assertIsInstance(self.end, tuple, "Must return tuple")
def test_dates_return_contents(self):
for val in self.start:
self.assertIsInstance(val, int, "Tuple contents must be ints")
for val in self.end:
self.assertIsInstance(val, int, "Tuple contents must be ints")
def test_dates_return_length(self):
self.assertEqual(len(self.start), 3, "Must return 3 integers")
self.assertEqual(len(self.end), 3, "Must return 3 integers")
def test_epoch_to_date(self):
self.assertEqual(self.start, (1970, 1, 1), "Should be epoch date")
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
dimroc/tensorflow-mnist-tutorial
|
lib/python3.6/site-packages/matplotlib/backends/qt_editor/formlayout.py
|
10
|
19681
|
# -*- coding: utf-8 -*-
"""
formlayout
==========
Module creating Qt form dialogs/layouts to edit various type of parameters
formlayout License Agreement (MIT License)
------------------------------------------
Copyright (c) 2009 Pierre Raybaut
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# History:
# 1.0.10: added float validator (disable "Ok" and "Apply" button when not valid)
# 1.0.7: added support for "Apply" button
# 1.0.6: code cleaning
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__version__ = '1.0.10'
__license__ = __doc__
DEBUG = False
import copy
import datetime
import warnings
import six
from matplotlib import colors as mcolors
from matplotlib.backends.qt_compat import QtGui, QtWidgets, QtCore
BLACKLIST = set(["title", "label"])
class ColorButton(QtWidgets.QPushButton):
"""
Color choosing push button
"""
colorChanged = QtCore.Signal(QtGui.QColor)
def __init__(self, parent=None):
QtWidgets.QPushButton.__init__(self, parent)
self.setFixedSize(20, 20)
self.setIconSize(QtCore.QSize(12, 12))
self.clicked.connect(self.choose_color)
self._color = QtGui.QColor()
def choose_color(self):
color = QtWidgets.QColorDialog.getColor(
self._color, self.parentWidget(), "",
QtWidgets.QColorDialog.ShowAlphaChannel)
if color.isValid():
self.set_color(color)
def get_color(self):
return self._color
@QtCore.Slot(QtGui.QColor)
def set_color(self, color):
if color != self._color:
self._color = color
self.colorChanged.emit(self._color)
pixmap = QtGui.QPixmap(self.iconSize())
pixmap.fill(color)
self.setIcon(QtGui.QIcon(pixmap))
color = QtCore.Property(QtGui.QColor, get_color, set_color)
def to_qcolor(color):
"""Create a QColor from a matplotlib color"""
qcolor = QtGui.QColor()
try:
rgba = mcolors.to_rgba(color)
except ValueError:
warnings.warn('Ignoring invalid color %r' % color)
return qcolor # return invalid QColor
qcolor.setRgbF(*rgba)
return qcolor
class ColorLayout(QtWidgets.QHBoxLayout):
"""Color-specialized QLineEdit layout"""
def __init__(self, color, parent=None):
QtWidgets.QHBoxLayout.__init__(self)
assert isinstance(color, QtGui.QColor)
self.lineedit = QtWidgets.QLineEdit(
mcolors.to_hex(color.getRgbF(), keep_alpha=True), parent)
self.lineedit.editingFinished.connect(self.update_color)
self.addWidget(self.lineedit)
self.colorbtn = ColorButton(parent)
self.colorbtn.color = color
self.colorbtn.colorChanged.connect(self.update_text)
self.addWidget(self.colorbtn)
def update_color(self):
color = self.text()
qcolor = to_qcolor(color)
self.colorbtn.color = qcolor # defaults to black if not qcolor.isValid()
def update_text(self, color):
self.lineedit.setText(mcolors.to_hex(color.getRgbF(), keep_alpha=True))
def text(self):
return self.lineedit.text()
def font_is_installed(font):
"""Check if font is installed"""
return [fam for fam in QtGui.QFontDatabase().families()
if six.text_type(fam) == font]
def tuple_to_qfont(tup):
"""
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
"""
if not (isinstance(tup, tuple) and len(tup) == 4
and font_is_installed(tup[0])
and isinstance(tup[1], int)
and isinstance(tup[2], bool)
and isinstance(tup[3], bool)):
return None
font = QtGui.QFont()
family, size, italic, bold = tup
font.setFamily(family)
font.setPointSize(size)
font.setItalic(italic)
font.setBold(bold)
return font
def qfont_to_tuple(font):
return (six.text_type(font.family()), int(font.pointSize()),
font.italic(), font.bold())
class FontLayout(QtWidgets.QGridLayout):
"""Font selection"""
def __init__(self, value, parent=None):
QtWidgets.QGridLayout.__init__(self)
font = tuple_to_qfont(value)
assert font is not None
# Font family
self.family = QtWidgets.QFontComboBox(parent)
self.family.setCurrentFont(font)
self.addWidget(self.family, 0, 0, 1, -1)
# Font size
self.size = QtWidgets.QComboBox(parent)
self.size.setEditable(True)
sizelist = list(range(6, 12)) + list(range(12, 30, 2)) + [36, 48, 72]
size = font.pointSize()
if size not in sizelist:
sizelist.append(size)
sizelist.sort()
self.size.addItems([str(s) for s in sizelist])
self.size.setCurrentIndex(sizelist.index(size))
self.addWidget(self.size, 1, 0)
# Italic or not
self.italic = QtWidgets.QCheckBox(self.tr("Italic"), parent)
self.italic.setChecked(font.italic())
self.addWidget(self.italic, 1, 1)
# Bold or not
self.bold = QtWidgets.QCheckBox(self.tr("Bold"), parent)
self.bold.setChecked(font.bold())
self.addWidget(self.bold, 1, 2)
def get_font(self):
font = self.family.currentFont()
font.setItalic(self.italic.isChecked())
font.setBold(self.bold.isChecked())
font.setPointSize(int(self.size.currentText()))
return qfont_to_tuple(font)
def is_edit_valid(edit):
text = edit.text()
state = edit.validator().validate(text, 0)[0]
return state == QtGui.QDoubleValidator.Acceptable
class FormWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, data, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.data = copy.deepcopy(data)
self.widgets = []
self.formlayout = QtWidgets.QFormLayout(self)
if comment:
self.formlayout.addRow(QtWidgets.QLabel(comment))
self.formlayout.addRow(QtWidgets.QLabel(" "))
if DEBUG:
print("\n"+("*"*80))
print("DATA:", self.data)
print("*"*80)
print("COMMENT:", comment)
print("*"*80)
def get_dialog(self):
"""Return FormDialog instance"""
dialog = self.parent()
while not isinstance(dialog, QtWidgets.QDialog):
dialog = dialog.parent()
return dialog
def setup(self):
for label, value in self.data:
if DEBUG:
print("value:", value)
if label is None and value is None:
# Separator: (None, None)
self.formlayout.addRow(QtWidgets.QLabel(" "), QtWidgets.QLabel(" "))
self.widgets.append(None)
continue
elif label is None:
# Comment
self.formlayout.addRow(QtWidgets.QLabel(value))
self.widgets.append(None)
continue
elif tuple_to_qfont(value) is not None:
field = FontLayout(value, self)
elif (label.lower() not in BLACKLIST
and mcolors.is_color_like(value)):
field = ColorLayout(to_qcolor(value), self)
elif isinstance(value, six.string_types):
field = QtWidgets.QLineEdit(value, self)
elif isinstance(value, (list, tuple)):
if isinstance(value, tuple):
value = list(value)
selindex = value.pop(0)
field = QtWidgets.QComboBox(self)
if isinstance(value[0], (list, tuple)):
keys = [key for key, _val in value]
value = [val for _key, val in value]
else:
keys = value
field.addItems(value)
if selindex in value:
selindex = value.index(selindex)
elif selindex in keys:
selindex = keys.index(selindex)
elif not isinstance(selindex, int):
warnings.warn(
"index '%s' is invalid (label: %s, value: %s)" %
(selindex, label, value))
selindex = 0
field.setCurrentIndex(selindex)
elif isinstance(value, bool):
field = QtWidgets.QCheckBox(self)
if value:
field.setCheckState(QtCore.Qt.Checked)
else:
field.setCheckState(QtCore.Qt.Unchecked)
elif isinstance(value, float):
field = QtWidgets.QLineEdit(repr(value), self)
field.setCursorPosition(0)
field.setValidator(QtGui.QDoubleValidator(field))
field.validator().setLocale(QtCore.QLocale("C"))
dialog = self.get_dialog()
dialog.register_float_field(field)
field.textChanged.connect(lambda text: dialog.update_buttons())
elif isinstance(value, int):
field = QtWidgets.QSpinBox(self)
field.setRange(-1e9, 1e9)
field.setValue(value)
elif isinstance(value, datetime.datetime):
field = QtWidgets.QDateTimeEdit(self)
field.setDateTime(value)
elif isinstance(value, datetime.date):
field = QtWidgets.QDateEdit(self)
field.setDate(value)
else:
field = QtWidgets.QLineEdit(repr(value), self)
self.formlayout.addRow(label, field)
self.widgets.append(field)
def get(self):
valuelist = []
for index, (label, value) in enumerate(self.data):
field = self.widgets[index]
if label is None:
# Separator / Comment
continue
elif tuple_to_qfont(value) is not None:
value = field.get_font()
elif (isinstance(value, six.string_types)
or mcolors.is_color_like(value)):
value = six.text_type(field.text())
elif isinstance(value, (list, tuple)):
index = int(field.currentIndex())
if isinstance(value[0], (list, tuple)):
value = value[index][0]
else:
value = value[index]
elif isinstance(value, bool):
value = field.checkState() == QtCore.Qt.Checked
elif isinstance(value, float):
value = float(str(field.text()))
elif isinstance(value, int):
value = int(field.value())
elif isinstance(value, datetime.datetime):
value = field.dateTime().toPyDateTime()
elif isinstance(value, datetime.date):
value = field.date().toPyDate()
else:
value = eval(str(field.text()))
valuelist.append(value)
return valuelist
class FormComboWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, datalist, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
self.combobox = QtWidgets.QComboBox()
layout.addWidget(self.combobox)
self.stackwidget = QtWidgets.QStackedWidget(self)
layout.addWidget(self.stackwidget)
self.combobox.currentIndexChanged.connect(self.stackwidget.setCurrentIndex)
self.widgetlist = []
for data, title, comment in datalist:
self.combobox.addItem(title)
widget = FormWidget(data, comment=comment, parent=self)
self.stackwidget.addWidget(widget)
self.widgetlist.append(widget)
def setup(self):
for widget in self.widgetlist:
widget.setup()
def get(self):
return [widget.get() for widget in self.widgetlist]
class FormTabWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, datalist, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
layout = QtWidgets.QVBoxLayout()
self.tabwidget = QtWidgets.QTabWidget()
layout.addWidget(self.tabwidget)
self.setLayout(layout)
self.widgetlist = []
for data, title, comment in datalist:
if len(data[0]) == 3:
widget = FormComboWidget(data, comment=comment, parent=self)
else:
widget = FormWidget(data, comment=comment, parent=self)
index = self.tabwidget.addTab(widget, title)
self.tabwidget.setTabToolTip(index, comment)
self.widgetlist.append(widget)
def setup(self):
for widget in self.widgetlist:
widget.setup()
def get(self):
return [widget.get() for widget in self.widgetlist]
class FormDialog(QtWidgets.QDialog):
"""Form Dialog"""
def __init__(self, data, title="", comment="",
icon=None, parent=None, apply=None):
QtWidgets.QDialog.__init__(self, parent)
self.apply_callback = apply
# Form
if isinstance(data[0][0], (list, tuple)):
self.formwidget = FormTabWidget(data, comment=comment,
parent=self)
elif len(data[0]) == 3:
self.formwidget = FormComboWidget(data, comment=comment,
parent=self)
else:
self.formwidget = FormWidget(data, comment=comment,
parent=self)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.formwidget)
self.float_fields = []
self.formwidget.setup()
# Button box
self.bbox = bbox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
self.formwidget.update_buttons.connect(self.update_buttons)
if self.apply_callback is not None:
apply_btn = bbox.addButton(QtWidgets.QDialogButtonBox.Apply)
apply_btn.clicked.connect(self.apply)
bbox.accepted.connect(self.accept)
bbox.rejected.connect(self.reject)
layout.addWidget(bbox)
self.setLayout(layout)
self.setWindowTitle(title)
if not isinstance(icon, QtGui.QIcon):
icon = QtWidgets.QWidget().style().standardIcon(QtWidgets.QStyle.SP_MessageBoxQuestion)
self.setWindowIcon(icon)
def register_float_field(self, field):
self.float_fields.append(field)
def update_buttons(self):
valid = True
for field in self.float_fields:
if not is_edit_valid(field):
valid = False
for btn_type in (QtWidgets.QDialogButtonBox.Ok,
QtWidgets.QDialogButtonBox.Apply):
btn = self.bbox.button(btn_type)
if btn is not None:
btn.setEnabled(valid)
def accept(self):
self.data = self.formwidget.get()
QtWidgets.QDialog.accept(self)
def reject(self):
self.data = None
QtWidgets.QDialog.reject(self)
def apply(self):
self.apply_callback(self.formwidget.get())
def get(self):
"""Return form result"""
return self.data
def fedit(data, title="", comment="", icon=None, parent=None, apply=None):
"""
Create form dialog and return result
(if Cancel button is pressed, return None)
data: datalist, datagroup
title: string
comment: string
icon: QIcon instance
parent: parent QWidget
apply: apply callback (function)
datalist: list/tuple of (field_name, field_value)
datagroup: list/tuple of (datalist *or* datagroup, title, comment)
-> one field for each member of a datalist
-> one tab for each member of a top-level datagroup
-> one page (of a multipage widget, each page can be selected with a combo
box) for each member of a datagroup inside a datagroup
Supported types for field_value:
- int, float, str, unicode, bool
- colors: in Qt-compatible text form, i.e. in hex format or name (red,...)
(automatically detected from a string)
- list/tuple:
* the first element will be the selected index (or value)
* the other elements can be couples (key, value) or only values
"""
# Create a QApplication instance if no instance currently exists
# (e.g., if the module is used directly from the interpreter)
if QtWidgets.QApplication.startingUp():
_app = QtWidgets.QApplication([])
dialog = FormDialog(data, title, comment, icon, parent, apply)
if dialog.exec_():
return dialog.get()
if __name__ == "__main__":
def create_datalist_example():
return [('str', 'this is a string'),
('list', [0, '1', '3', '4']),
('list2', ['--', ('none', 'None'), ('--', 'Dashed'),
('-.', 'DashDot'), ('-', 'Solid'),
('steps', 'Steps'), (':', 'Dotted')]),
('float', 1.2),
(None, 'Other:'),
('int', 12),
('font', ('Arial', 10, False, True)),
('color', '#123409'),
('bool', True),
('date', datetime.date(2010, 10, 10)),
('datetime', datetime.datetime(2010, 10, 10)),
]
def create_datagroup_example():
datalist = create_datalist_example()
return ((datalist, "Category 1", "Category 1 comment"),
(datalist, "Category 2", "Category 2 comment"),
(datalist, "Category 3", "Category 3 comment"))
#--------- datalist example
datalist = create_datalist_example()
def apply_test(data):
print("data:", data)
print("result:", fedit(datalist, title="Example",
comment="This is just an <b>example</b>.",
apply=apply_test))
#--------- datagroup example
datagroup = create_datagroup_example()
print("result:", fedit(datagroup, "Global title"))
#--------- datagroup inside a datagroup example
datalist = create_datalist_example()
datagroup = create_datagroup_example()
print("result:", fedit(((datagroup, "Title 1", "Tab 1 comment"),
(datalist, "Title 2", "Tab 2 comment"),
(datalist, "Title 3", "Tab 3 comment")),
"Global title"))
|
apache-2.0
|
PatrickOReilly/scikit-learn
|
sklearn/svm/tests/test_sparse.py
|
7
|
13354
|
from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import (assert_warns, assert_raise_message,
ignore_warnings)
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
|
bsd-3-clause
|
petosegan/scikit-learn
|
sklearn/ensemble/__init__.py
|
217
|
1307
|
"""
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
|
bsd-3-clause
|
ckuethe/gnuradio
|
gr-filter/examples/decimate.py
|
58
|
6061
|
#!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 10000000 # number of samples to use
self._fs = 10000 # initial sampling rate
self._decim = 20 # Decimation rate
# Generate the prototype filter taps for the decimators with a 200 Hz bandwidth
self._taps = filter.firdes.low_pass_2(1, self._fs,
200, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._decim))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._decim
print "Taps per channel: ", tpc
# Build the input signal source
# We create a list of freqs, and a sine wave is generated and added to the source
# for each one of these frequencies.
self.signals = list()
self.add = blocks.add_cc()
freqs = [10, 20, 2040]
for i in xrange(len(freqs)):
self.signals.append(analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct a PFB decimator filter
self.pfb = filter.pfb.decimator_ccf(self._decim, self._taps, 0)
# Construct a standard FIR decimating filter
self.dec = filter.fir_filter_ccf(self._decim, self._taps)
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Create the sink for the decimated siganl
self.snk = blocks.vector_sink_c()
self.connect(self.pfb, self.snk)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(16,9))
fig2 = pylab.figure(2, figsize=(16,9))
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input to the decimator
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b")
p1_t = sp1_t.plot(t_in, x_in.imag, "r")
sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot the output of the decimator
fs_o = tb._fs / tb._decim
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk.data()[Ns:Ns+Ne]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("PFB Decimated Signal", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
BigDataforYou/movie_recommendation_workshop_1
|
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/util/print_versions.py
|
1
|
4886
|
import os
import platform
import sys
import struct
import subprocess
import codecs
def get_sys_info():
"Returns system information as a dict"
blob = []
# get full commit hash
commit = None
if os.path.isdir(".git") and os.path.isdir("pandas"):
try:
pipe = subprocess.Popen('git log --format="%H" -n 1'.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
so, serr = pipe.communicate()
except:
pass
else:
if pipe.returncode == 0:
commit = so
try:
commit = so.decode('utf-8')
except ValueError:
pass
commit = commit.strip().strip('"')
blob.append(('commit', commit))
try:
(sysname, nodename, release,
version, machine, processor) = platform.uname()
blob.extend([
("python", "%d.%d.%d.%s.%s" % sys.version_info[:]),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
# ("Version", "%s" % (version)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
("byteorder", "%s" % sys.byteorder),
("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")),
("LANG", "%s" % os.environ.get('LANG', "None")),
])
except:
pass
return blob
def show_versions(as_json=False):
import imp
sys_info = get_sys_info()
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("pandas", lambda mod: mod.__version__),
("nose", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("Cython", lambda mod: mod.__version__),
("numpy", lambda mod: mod.version.version),
("scipy", lambda mod: mod.version.version),
("statsmodels", lambda mod: mod.__version__),
("xarray", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("patsy", lambda mod: mod.__version__),
("dateutil", lambda mod: mod.__version__),
("pytz", lambda mod: mod.VERSION),
("blosc", lambda mod: mod.__version__),
("bottleneck", lambda mod: mod.__version__),
("tables", lambda mod: mod.__version__),
("numexpr", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("openpyxl", lambda mod: mod.__version__),
("xlrd", lambda mod: mod.__VERSION__),
("xlwt", lambda mod: mod.__VERSION__),
("xlsxwriter", lambda mod: mod.__version__),
("lxml", lambda mod: mod.etree.__version__),
("bs4", lambda mod: mod.__version__),
("html5lib", lambda mod: mod.__version__),
("httplib2", lambda mod: mod.__version__),
("apiclient", lambda mod: mod.__version__),
("sqlalchemy", lambda mod: mod.__version__),
("pymysql", lambda mod: mod.__version__),
("psycopg2", lambda mod: mod.__version__),
("jinja2", lambda mod: mod.__version__),
("boto", lambda mod: mod.__version__),
("pandas_datareader", lambda mod: mod.__version__)
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
try:
mod = imp.load_module(modname, *imp.find_module(modname))
except (ImportError):
import importlib
mod = importlib.import_module(modname)
ver = ver_f(mod)
deps_blob.append((modname, ver))
except:
deps_blob.append((modname, None))
if (as_json):
try:
import json
except:
import simplejson as json
j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
if as_json is True:
print(j)
else:
with codecs.open(as_json, "wb", encoding='utf8') as f:
json.dump(j, f, indent=2)
else:
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
print("%s: %s" % (k, stat))
print("")
for k, stat in deps_blob:
print("%s: %s" % (k, stat))
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-j", "--json", metavar="FILE", nargs=1,
help="Save output as JSON into file, pass in "
"'-' to output to stdout")
(options, args) = parser.parse_args()
if options.json == "-":
options.json = True
show_versions(as_json=options.json)
return 0
if __name__ == "__main__":
sys.exit(main())
|
mit
|
BGodefroyFR/Deep-Audio-Visualization
|
learning/scripts/extractSpectrograms.py
|
1
|
1608
|
####
# Extracts spectrograms from raw audio file and exports pickled data
# param 1: raw audio file path
# param 2: Resample if necessary?
###
from pylab import *
from scipy.io import wavfile
from scipy.signal import *
from scipy import *
import matplotlib
import sys
import os
import pickle
import params
def getMax(arr):
_max = -1.
for i in range(0, len(arr)):
for j in range(0, len(arr[0])):
if (arr[i,j] > _max):
_max = arr[i,j]
return _max
def extractSpectrogram(rawAudioPath, doResample=False) :
originalSampleFreq, audioData = wavfile.read(rawAudioPath)
# If sample rate different from the standart one, resamples.
if originalSampleFreq != params.SAMPLE_RATE:
if doResample:
print "Resampling..."
"""audioData = resample(audioData,
int(float(len(audioData)) * float(params.SAMPLE_RATE) / float(originalSampleFreq)))"""
newFilePath = rawAudioPath[:-4] + "_resamp.wav"
print("sox " + rawAudioPath + " -r " + str(params.SAMPLE_RATE) + " " + newFilePath)
os.system("sox " + rawAudioPath + " -r " + str(params.SAMPLE_RATE) + " " + newFilePath)
originalSampleFreq, audioData = wavfile.read(newFilePath)
else:
return None
audioData = audioData / (2.**15) # Map values into [-1, 1]
spectrograms = np.empty((len(audioData) / params.WINDOW_SIZE, params.WINDOW_SIZE))
print "Extracting spectrogram..."
for i in range(0, len(audioData) / params.WINDOW_SIZE):
spectrograms[i,:] = abs( np.fft.fft(audioData[i*params.WINDOW_SIZE:(i+1)*params.WINDOW_SIZE]) )
return spectrograms
if __name__ == "__main__":
extractSpectrogram(sys.argv[1], sys.argv[2])
|
gpl-3.0
|
arahuja/scikit-learn
|
examples/cluster/plot_lena_segmentation.py
|
271
|
2444
|
"""
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
|
bsd-3-clause
|
guiccbr/autonomous-fuzzy-quadcopter
|
python/py_quad_control/py_sim/sparc/test_drone.py
|
1
|
9880
|
# ------------------------ Imports ----------------------------------#
from ...controller import sparc
from ...models.py import quadcopter as quad, model
import matplotlib.pyplot as plt
import numpy as np
import math
# ------------------------ Constants -------------------------------#
# - Motor:
MOTOR_KV = 980
MOTOR_MAX_VOLTAGE = 11.1
# - Control Signal:
UPARKED = 1058.75 # Control signal sent to motors that is enough to balance
UMIN_ALT = -50 # Range Min
UMAX_ALT = +50 # Range Max
UMIN_PITCHROLL = -10
UMAX_PITCHROLL = +10
UMIN_YAW = -10
UMAX_YAW = +10
# Not that:
# UMAX_ALT + UMAX_YAW + UMAX_PITCHROLL <= 2000 (MAX ENGINE CONTROL SIGNAL)
# UMIN_ALT + UMIN_YAW + UMIN_PITCHROLL >= 1000 (MIN ENGINE CONTROL SIGNAL)
# - Input Signal (Measured by sensors on the plant)
X_SIZE = 2 # Dimension of the input (measured by sensors of the plant)
# - Plant output reference (Measured by sensors on the plant)
REFMAX_ALT = 8.0 # Range Max
REFMIN_ALT = 0.0 # Range Min
REFMIN_PITCHROLL = -45.0
REFMAX_PITCHROLL = +45.0
REFMIN_YAW = -150.0
REFMAX_YAW = +150.0
# - Time Step
STEPTIME = 0.1
MAXTIME = 2000
# - Noise Percent
NOISE = 0.0
# ------------------------ Main Program ---------------------------#
def test_sparc_model(debug):
# Instantiates figure for plotting motor angular velocities:
fig_motors = plt.figure('Motors')
axes_motors = fig_motors.add_axes([0.1, 0.1, 0.8, 0.8])
motor_points = [np.array([]), np.array([]), np.array([]), np.array([])]
# Instantiates figure for plotting alt results:
fig_alt = plt.figure('Quadcopter alt')
axes_alt = fig_alt.add_axes([0.1, 0.1, 0.8, 0.8])
ypoints_alt = []
refpoints_alt = []
# Instantiates figure for plotting pitch results:
fig_pitch = plt.figure('Quadcopter pitch')
axes_pitch = fig_pitch.add_axes([0.1, 0.1, 0.8, 0.8])
ypoints_pitch = []
refpoints_pitch = []
# Instantiates figure for plotting roll results:
fig_roll = plt.figure('Quadcopter roll')
axes_roll = fig_roll.add_axes([0.1, 0.1, 0.8, 0.8])
ypoints_roll = []
refpoints_roll = []
# Instantiates figure for plotting yaw results:
fig_yaw = plt.figure('Quadcopter yaw')
axes_yaw = fig_yaw.add_axes([0.1, 0.1, 0.8, 0.8])
ypoints_yaw = []
refpoints_yaw = []
# Instantiate Plant:
quadcopter = quad.quadcopter(model.model())
# Start prev_ values:
prev_y = [0.0, 0.0, 0.0, 0.0]
prev_ref = [0.0, 0.0, 0.0, 0.0]
prev_u = [0.0, 0.0, 0.0, 0.0]
# Reference
new_reference = True
# Run for k steps
k = 1
while k * STEPTIME < MAXTIME:
# Get sample, and generates input
quad_position = quadcopter.x
quad_angles = quadcopter.theta # angles: [pitch, roll, yaw]
# y : [alt, yaw, pitch, roll]
curr_y = [quad_position[2], quad_angles[2], quad_angles[0], quad_angles[1]]
# Set references.
curr_ref = [7.0, 0.0, 0.0, 0.0]
# If reference curve has changed, update C.
if k != 1 and new_reference:
controller_alt.update_reference_range(REFMIN_ALT, REFMAX_ALT)
controller_pitch.update_reference_range(REFMIN_PITCHROLL, REFMAX_PITCHROLL)
controller_roll.update_reference_range(REFMIN_PITCHROLL, REFMAX_PITCHROLL)
controller_yaw.update_reference_range(REFMIN_YAW, REFMAX_YAW)
new_reference = False
# Adding Noise:
curr_y = curr_y * (1 + 2 * NOISE * np.random.rand(4, 1))
curr_x = [generate_input(curr_y[0], prev_y[0], curr_ref[0], prev_ref[0]),
generate_input(curr_y[1], prev_y[1], curr_ref[1], prev_ref[1]),
generate_input(curr_y[2], prev_y[2], curr_ref[2], prev_ref[2]),
generate_input(curr_y[3], prev_y[3], curr_ref[3], prev_ref[3])]
# Stores on list for plotting:
ypoints_alt.append(curr_y[0])
refpoints_alt.append(curr_ref[0])
# Stores on list for plotting:
ypoints_pitch.append(curr_y[2])
refpoints_pitch.append(curr_ref[2])
# Stores on list for plotting:
ypoints_roll.append(curr_y[3])
refpoints_roll.append(curr_ref[3])
# Stores on list for plotting:
ypoints_yaw.append(curr_y[1])
refpoints_yaw.append(curr_ref[1])
# Print result (curr_ref - curr_y)
# print "Step:", k, " | y:", curr_y, " | err:", np.subtract(curr_y,curr_ref).tolist(), " | u:", curr_u
prev_y = curr_y[:]
prev_ref = curr_ref[:]
if (k * STEPTIME) % (MAXTIME / 10) == 0:
print 't[s]:', k * STEPTIME
print '#clouds:', 'alt =', len(controller_alt.clouds), \
'yaw =', len(controller_yaw.clouds), \
'pitch =', len(controller_pitch.clouds), \
'roll =', len(controller_roll.clouds)
# On the first iteration, initializes the controller with the first values
if k == 1:
# Initial Control signal (defined as the error relative to the reference):
e_alt = curr_x[0][0]
e_yaw = curr_x[1][0]
e_pitch = curr_x[2][0]
e_roll = curr_x[3][0]
curr_u = [e_alt, e_yaw, e_pitch, e_roll]
# Instantiates Controller and does not update model:
controller_alt = sparc.SparcController((UMIN_ALT, UMAX_ALT), (REFMIN_ALT, REFMAX_ALT), X_SIZE, curr_x[0],
curr_u[0], curr_ref[0], curr_y[0])
controller_yaw = sparc.SparcController((UMIN_YAW, UMAX_YAW), (REFMIN_YAW, REFMAX_YAW), X_SIZE, curr_x[1],
curr_u[1], curr_ref[1], curr_y[1])
controller_pitch = sparc.SparcController((UMIN_PITCHROLL, UMAX_PITCHROLL),
(REFMIN_PITCHROLL, REFMAX_PITCHROLL),
X_SIZE, curr_x[2], curr_u[2], curr_ref[2], curr_y[2])
controller_roll = sparc.SparcController((UMIN_PITCHROLL, UMAX_PITCHROLL),
(REFMIN_PITCHROLL, REFMAX_PITCHROLL),
X_SIZE, curr_x[3], curr_u[3], curr_ref[3], curr_y[3])
else:
# Gets the output of the controller for the current input x
alt_u = controller_alt.update(curr_x[0], curr_y[0], curr_ref[0], prev_u[0])
# yaw_u = controller_yaw.update(curr_x[1], curr_y[1], curr_ref[1], prev_u[1])
# pitch_u = controller_pitch.update(curr_x[2], curr_y[2], curr_ref[2], prev_u[2])
# roll_u = controller_roll.update(curr_x[3], curr_y[3], curr_ref[3], prev_u[3])
curr_u = [alt_u, 0.0, 0.0, 0.0]
# Convert control signals to speed on engines:
# Method I:
m1 = UPARKED + curr_u[0] + curr_u[1] + curr_u[2]
m2 = UPARKED + curr_u[0] - curr_u[1] + curr_u[3]
m3 = UPARKED + curr_u[0] + curr_u[1] - curr_u[2]
m4 = UPARKED + curr_u[0] - curr_u[1] - curr_u[3]
# Method 2 (from V-REP quad model):
# m1 = UPARKED + curr_u[0]*(1 + curr_u[1] + curr_u[2])
# m2 = UPARKED + curr_u[0]*(1 - curr_u[1] + curr_u[3])
# m3 = UPARKED + curr_u[0]*(1 + curr_u[1] - curr_u[2])
# m4 = UPARKED + curr_u[0]*(1 - curr_u[1] - curr_u[3])
# Stores on list for plotting:
motor_points[0] = np.append(motor_points[0], [m1])
motor_points[1] = np.append(motor_points[1], [m2])
motor_points[2] = np.append(motor_points[2], [m3])
motor_points[3] = np.append(motor_points[3], [m4])
if debug == 'T':
print '#Clouds Alt: ', len(controller_yaw.clouds)
print 'u: ', curr_u
print '(alt, yaw, pitch, roll): ', (curr_y[0], curr_y[1], curr_y[2], curr_y[3])
print 'Engines: ', (m1, m2, m3, m4)
# Updates the model
quadcopter.update(STEPTIME, (conv_control_to_motor_speed(m1),
conv_control_to_motor_speed(m2),
conv_control_to_motor_speed(m3),
conv_control_to_motor_speed(m4)))
# Increment K
k += 1
# Store prev_u
prev_u = curr_u
# Plotting
kpoints = [x * float(STEPTIME) for x in range(1, k)]
axes_motors.plot(kpoints, motor_points[0], 'r')
axes_motors.plot(kpoints, motor_points[1], 'y')
axes_motors.plot(kpoints, motor_points[2], 'b')
axes_motors.plot(kpoints, motor_points[3], 'g')
axes_alt.plot(kpoints, refpoints_alt, 'r')
axes_alt.plot(kpoints, ypoints_alt, 'b')
axes_roll.plot(kpoints, refpoints_roll, 'r')
axes_roll.plot(kpoints, ypoints_roll, 'b')
axes_pitch.plot(kpoints, refpoints_pitch, 'r')
axes_pitch.plot(kpoints, ypoints_pitch, 'b')
axes_yaw.plot(kpoints, refpoints_yaw, 'r')
axes_yaw.plot(kpoints, ypoints_yaw, 'b')
plt.show()
# ------------------------ Global Methods -------------------------#
def reference(a, k):
"""
Outputs the desired output of the plant, on the time step k.
Keyword arguments:
k -- timestemp
"""
# Exponential
refk = a * (1 - math.e ** (-0.01 * k))
# refk = 5*math.cos((2*math.pi/t)*k*STEPTIME) + 5*math.sin((1.4*2*math.pi/t)*k*STEPTIME) + 20
return refk
def generate_input(y, yprev, ref, refprev):
if math.isnan(y):
curr_e = 0
else:
curr_e = ref - y
prev_e = refprev - yprev
# x = np.array([y, yprev])
# x = np.array([curr_e, (curr_e-prev_e)/t])
x = np.array([curr_e, (curr_e - prev_e)])
return x
def conv_control_to_motor_speed(m):
return ((m - 1000.) / 1000.) * (MOTOR_MAX_VOLTAGE * MOTOR_KV)
# ------------------------ Run Main Program ------------------------#
test_sparc_model(debug=0)
|
mit
|
edawine/fatools
|
fatools/lib/fautil/plot.py
|
2
|
10261
|
"""
Collection of functions to do assay plotting using matplotlib.
"""
from os.path import splitext
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from fatools.lib import params
from fatools.lib.utils import cerr, cexit
from fatools.lib.fautil.wavelen2rgb import wavelen2rgb
def align_fsa(fsa):
"""
Align fsa to prepare for size and retention time extraction from each allele.
Input
-----
fsa: class of fsa
import Params() from fatools.lib.params for parameter in fsa alignment
Output
------
fsa that has been aligned
"""
fsa.align(params.Params())
def determine_figure_size(list_of_data):
"""
Prepare the figure size needed by getting number of data.
Input
-----
list_of_data: list for determining the data amount
Output
------
matplotlib.figure with size to accomodate subplots
"""
# Every axes are given 2" in height
height = 2 * len(list_of_data)
figure = plt.figure()
figure.set_size_inches(20, height)
return figure
def colorize_wavelength(wavelength):
"""
Find dye color by using wavelen2rgb.
Input
-----
wavelength: int of a dye wavelength
Output
------
RGB value in 3-tuple divided by 100: (R, G, B)
The division by 100 is necessary because matplotlib color parameter
only accepts value from 0-1.
"""
return tuple([color / 100 for color in wavelen2rgb(wavelength)])
def get_size_rtime_rfu(channel):
"""
Get size, retention time, and RFU from the align method of fsa.
Input
-----
channel: a channel class from one of the channels attribute in fsa class
Output
------
size_rtime_rfu: 3-tuples of size, rtime, and RFU from alleles in channel
Size with value '-1' are not included in the collection.
"""
alleles = channel.alleles
size_rtime_rfu = []
if alleles == []:
return size_rtime_rfu
for allele in alleles:
if allele.size == -1:
continue
size_rtime_rfu.append((allele.size, allele.rtime, allele.rfu))
return size_rtime_rfu
def prepare_second_x_axis(channel_axes, size_rtime_rfu):
"""
Create a second x-axis to indicate the size of alleles.
Input
-----
channel_axes: the channel axis to be marked
size_rtime_rfu: the data for marking the second x-axis
Output
------
channel_axes that have size markings (if available)
"""
sizes = []
rtimes = []
for size, rtime, _ in size_rtime_rfu:
sizes.append(int(size))
rtimes.append(rtime)
second_x_axis = channel_axes.twiny()
second_x_axis.set_xlim(channel_axes.get_xlim())
second_x_axis.set_xticks(rtimes)
second_x_axis.set_xticklabels(sizes, rotation='vertical', fontsize=8)
return second_x_axis
def save_or_show(plot_file):
"""
Determine if the plot is to be saved or shown.
Input
-----
figure: class of figure from matplotlib
plot_file: location and file name for saving plot
Output
------
If plot_file is None, then show plot to user.
If plot_file is supplied, then the plot is saved to file.
If plot_file is PdfPages object, save to PdfPages object.
"""
plt.tight_layout()
try:
plot_file.savefig(dpi=150)
except AttributeError:
if plot_file is not None:
plt.savefig(plot_file, dpi=150)
else:
plt.show()
finally:
plt.close()
def do_plot(fsa, plot_file=None):
"""
Plot an assay in a plot.
Input
-----
fsa: class of fsa
plot_file: path for saving plot to file
Output
------
a figure class ready to be saved/shown
"""
channels = fsa.channels
for channel in channels:
color = colorize_wavelength(channel.wavelen)
plt.plot(channel.data, color=color, label=channel.dye)
plt.legend(framealpha=0.5)
plt.title(fsa.filename)
save_or_show(plot_file)
def do_split_plot(fsa, plot_file=None):
"""
Plot an assay dye, in every subplot.
Input
-----
fsa: class of fsa
plot_file: path for saving plot to file
Output
------
a figure class ready to be saved/shown
"""
align_fsa(fsa)
channels = fsa.channels
figure = determine_figure_size(channels)
fsa_subplots = []
twiny_axes = []
for channel_axes_num, channel in enumerate(channels):
color = colorize_wavelength(channel.wavelen)
channel_axes = figure.add_subplot(len(channels), 1, channel_axes_num + 1)
channel_axes.plot(channel.data, color=color, label=channel.dye)
channel_axes.legend(framealpha=0.5)
fsa_subplots.append(channel_axes)
size_rtime_rfu = get_size_rtime_rfu(channel)
if size_rtime_rfu:
max_rfu = max(p[2] for p in size_rtime_rfu) * 1.2
else:
max_rfu = max(channel.data) * 1.2
channel_axes.set_ylim((0, max_rfu))
second_x_axis = prepare_second_x_axis(channel_axes, size_rtime_rfu)
twiny_axes.append(second_x_axis)
if channel_axes_num == 0:
channel_axes.set_title(fsa.filename, y=1.3)
for axes in fsa_subplots:
axes.get_shared_x_axes().join(*fsa_subplots)
axes.get_shared_x_axes().join(*twiny_axes)
save_or_show(plot_file)
def determine_included_fsa_to_plot(score, rss, fsas):
"""
Separate based on score and RSS value.
Input
-----
score: int/float score threshold for fsa exclusion
rss: int/float RSS threshold for fsa exclusion
fsas: list of fsa files
Output
------
included_fsas: list of fsa that is lower than the score threshold
set in --score parameter and higher than the RSS
threshold set in --rss parameter
"""
included_fsas = []
for fsa in fsas:
align_fsa(fsa)
if fsa.score <= score and fsa.rss >= rss:
included_fsas.append(fsa)
# sort FSAs by score (ascending) and rss (descending)
included_fsas.sort(key=lambda fsa: (fsa.score, -fsa.rss))
# Limit to the top 100 worst fsa score & RSS
included_fsas = included_fsas[:100]
return included_fsas
def do_ladder_plot(fsas, plot_file):
"""
Create a plot of the ladder channel from fsa files.
Input
-----
args: arguments namespace from argparse
fsas: list of fsa files
plot_file: path for saving plot to file
Output
------
a figure class ready to be saved/shown
"""
figure = determine_figure_size(fsas)
for ladder_axes_num, fsa in enumerate(fsas):
ladder = fsa.get_ladder_channel()
color = colorize_wavelength(ladder.wavelen)
ladder_axes = figure.add_subplot(len(fsas), 1, ladder_axes_num + 1)
ladder_axes.plot(ladder.data, color=color, label=ladder.dye)
ladder_axes.legend(framealpha=0.5)
size_rtime_rfu = get_size_rtime_rfu(ladder)
if size_rtime_rfu:
max_rfu = max(p[2] for p in size_rtime_rfu) * 1.2
else:
max_rfu = max(ladder.data) * 1.2
ladder_axes.set_ylim((0, max_rfu))
prepare_second_x_axis(ladder_axes, size_rtime_rfu)
title = '{filename} | Score: {score:.2f} | RSS: {rss:.2f}'.format(
filename=fsa.filename,
score=fsa.score,
rss=fsa.rss)
ladder_axes.set_title(title, y=1.3)
save_or_show(plot_file)
def file_handler(fsa_list):
"""
Generate fsa file from list of fsa to plot.
Input
-----
fsa_list: list containing tuples of fsa and index
Output
------
A generator object returning fsa file
"""
for fsa, index in fsa_list:
yield fsa
def check_and_prepare_pdf(plot_file):
"""
Check if format is supported by matplotlib, then determine if
PdfPages object needs to be prepared for plotting to pdf.
Input
-----
plot_file: string of plot file name and format
Output
------
plot_file: PdfPages object with plot_file name if format is '.pdf'
"""
if plot_file is not None:
plot_file_ext = splitext(plot_file)[-1]
if plot_file_ext == '.pdf':
plot_file = PdfPages(plot_file)
else:
try:
plt.savefig(plot_file)
except ValueError:
cerr('E: Format {} is not supported!'.format(plot_file_ext))
cexit('Exiting...')
return plot_file
def command_block(args, fsas, plot_file):
"""
Prepare the necessary data and hold the commands to be done.
Give warnings if there are overlapping arguments.
Input
-----
args: arguments namespace from argparse
fsas: list of fsa files
plot_file: PdfPages object, string, or None
"""
if args.ladder_plot:
if args.plot or args.split_plot:
cerr('W: --plot, --split-plot and --ladder-plot are flagged')
cerr('W: Only the --ladder-plot option will be done')
included_fsas = determine_included_fsa_to_plot(args.score, args.rss, fsas)
do_ladder_plot(included_fsas, plot_file)
return
elif args.plot or args.split_plot:
if args.plot and args.split_plot and args.plot_file:
cerr('W: --plot, --split-plot, and --plot-file are flagged')
cerr('W: This will only save the --split-plot results if format is not in pdf')
for fsa in fsas:
if args.plot:
do_plot(fsa, plot_file)
if args.split_plot:
do_split_plot(fsa, plot_file)
def plot(args, fsa_list, dbh=None):
"""
The main function to handle all plot arguments given.
Input
-----
args: arguments namespace from argparse
fsa_list: list containing tuples of fsa and index
dbh: *reserved for database handling*
Output
------
Determine if a PdfPages object needs to be created, then
passing the args, fsas, and plot file to the commands.
"""
plot_file = check_and_prepare_pdf(args.plot_file)
try:
with plot_file as pdf:
command_block(args, file_handler(fsa_list), pdf)
except AttributeError:
command_block(args, file_handler(fsa_list), plot_file)
|
lgpl-3.0
|
dreadjesus/MachineLearning
|
KNN/exp_KNN.py
|
1
|
3254
|
'''
Used to classifie data, can take multiple targets
Nearest Neighbours: Pros and Cons
Pros:
Simple to implement
Flexible to feature / distance choices
Naturally handles multi-class cases
Can do well in practice with enough representative data
Cons:
Large search problem to find nearest neighbours
Storage of data
Must know we have a meaningful distance function
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
# load data
df = pd.read_csv(
'D:/Github_python_ML/MachineLearning/KNN/KNN_Project_Data')
# Scale the data, so it is used in the same range between eachother
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df.drop('TARGET CLASS', axis=1))
scaled_features = scaler.transform(df.drop('TARGET CLASS', axis=1))
df_feat = pd.DataFrame(data=scaled_features, columns=df.columns[:-1])
# split data to training and test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
df_feat, df['TARGET CLASS'], test_size=0.3)
# KNN object and use n_neighborsi=1 for testing how good/bad it preforms
from sklearn.neighbors import KNeighborsClassifier
KNN = KNeighborsClassifier(n_neighbors=1)
# train the KNN and predict
KNN.fit(X_train, y_train)
pred = KNN.predict(X_test)
# Print reports
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred))
# Test the model and se if we can get a better prediction from changing
# the n_neighbors value
error_rate = []
for index in range(1, 40):
myKNN = KNeighborsClassifier(n_neighbors=index)
myKNN.fit(X_train, y_train)
mypred = myKNN.predict(X_test)
error_rate.append(np.mean(mypred != y_test))
# Plot the error rate
plt.figure(figsize=(10, 6))
plt.plot(range(1, 40), error_rate, color='blue', marker='o',
markerfacecolor='green', markersize=8)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
plt.show()
# make new model and use the calculated best value.
# note to future me!! Good practice is to split the testing data one more
# time we dont train the K-value agains the testing data.
print('NEW n_neighbors: ', error_rate.index(min(error_rate)))
newKNN = KNeighborsClassifier(n_neighbors=error_rate.index(min(error_rate)))
newKNN.fit(X_train, y_train)
newpred = newKNN.predict(X_test)
print(confusion_matrix(y_test, newpred))
print(classification_report(y_test, newpred))
# Testing a GridSearchCV aproch aswell!
from sklearn.model_selection import GridSearchCV
param_grid={"n_neighbors":[1 , 5, 10, 15, 20, 30, 40]}
grid = GridSearchCV(KNeighborsClassifier(), param_grid)
grid.fit(X_train, y_train)
grid_pred = grid.predict(X_test)
print('\n --OLD VALUES!--')
print(classification_report(y_test, pred))
print(confusion_matrix(y_test, pred))
print('\n --NEW VALUES!--')
print(classification_report(y_test, newpred))
print(confusion_matrix(y_test, newpred))
print('\n --GRID VALUES!--')
print(classification_report(y_test, grid_pred))
print(confusion_matrix(y_test, grid_pred))
|
mit
|
arokem/pyAFQ
|
examples/plot_callosal_tract_profile.py
|
2
|
17674
|
"""
==============================
Plotting Novel Tract Profiles:
==============================
The following is an example of tractometry for a novel bundle and plotting the
resulting FA tract profile. We will run tractometry for the *anterior forceps*
using waypoint ROIs.
**AFQ Waypoint ROI Tractometry:**
.. note::
This example uses the Yeatman et al. waypoint ROI approach, first
described in [Yeatman2012]_ and further elaborated in [Yeatman2014]_.
The waypoint ROIs used in this example are from the anterior frontal lobe of
the corpus callosum (AntFrontal). The waypoint ROIs are from the human corpus
callosum templates:
https://figshare.com/articles/Templates_for_Automated_Fiber_Quantification_of_corpus_callosum_from_Diffusion_MRI_data/3381523
"""
import os.path as op
import matplotlib.pyplot as plt
import plotly
import numpy as np
import nibabel as nib
import dipy.data as dpd
from dipy.data import fetcher
from dipy.io.streamline import save_tractogram, load_tractogram
from dipy.stats.analysis import afq_profile, gaussian_weights
from dipy.io.stateful_tractogram import StatefulTractogram
from dipy.io.stateful_tractogram import Space
from dipy.align import affine_registration
import AFQ.data as afd
import AFQ.tractography as aft
import AFQ.registration as reg
import AFQ.models.dti as dti
import AFQ.segmentation as seg
from AFQ.utils.volume import transform_inverse_roi, density_map
from AFQ.viz.utils import show_anatomical_slices
from AFQ.viz.plotly_backend import visualize_bundles, visualize_volume
import logging
import sys
# Ensure segmentation logging information is included in this example's output
root = logging.getLogger()
root.setLevel(logging.ERROR)
logging.getLogger('AFQ.Segmentation').setLevel(logging.INFO)
logging.getLogger('AFQ.tractography').setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
root.addHandler(handler)
# Target directory for this example's output files
working_dir = "./callosal_tract_profile"
##########################################################################
# Get example data:
# -----------------
# **Diffusion dataset**
#
# .. note::
# The diffusion data used in this example are from the Stanford High Angular
# Resolution Diffusion Imaging (HARDI) dataset:
#
# https://purl.stanford.edu/ng782rw8378
print("Fetching data...")
# If does not already exist `fetch_stanford_hardi` will download the first
# subject's session from the HARDI data into fetcher.dipy_home:
# `~/.dipy/stanford_hardi`
dpd.fetch_stanford_hardi()
# Reference to data file locations
hardi_dir = op.join(fetcher.dipy_home, "stanford_hardi")
hardi_fdata = op.join(hardi_dir, "HARDI150.nii.gz")
hardi_fbval = op.join(hardi_dir, "HARDI150.bval")
hardi_fbvec = op.join(hardi_dir, "HARDI150.bvec")
print(f'Loading data file: {hardi_fdata}')
img = nib.load(hardi_fdata)
# Display output from this step
img_data = img.get_fdata()
# working with space and time data
img_data = img_data[..., int(img_data.shape[3] / 2)]
show_anatomical_slices(img_data, 'HARDI 150 DWI')
print(f'bvals: f{np.loadtxt(hardi_fbval)}')
print(f'bvec: f{np.loadtxt(hardi_fbvec)}')
##########################################################################
# Calculate DTI:
# --------------
# Fit the DTI model using default settings, save files with derived maps.
#
# By default following DTI measurements are calculated:
#
# - Fractional anisotropy (FA),
#
# - Mean diffusivity (MD),
#
# - Axial diffusivity (AD),
#
# - and Radial diffusivity (RD)
#
# In this example we will only use FA.
#
# .. note::
# By default:
#
# - All b-values less than or equal to 50 are considered to be
# without diffusion weighting.
#
# - No binary masks are applied; therefore all voxels are processed.
#
# .. note::
# The diffusion tensor imaging parameters contain the associated eigenvalues
# and eigenvectors from eigen decomposition on the diffusion tensor.
print("Calculating DTI...")
if not op.exists(op.join(working_dir, 'dti_FA.nii.gz')):
dti_params = dti.fit_dti(hardi_fdata, hardi_fbval, hardi_fbvec,
out_dir=working_dir)
else:
dti_params = {'FA': op.join(working_dir, 'dti_FA.nii.gz'),
'params': op.join(working_dir, 'dti_params.nii.gz')}
print(f"Loading {dti_params['FA']}")
FA_img = nib.load(dti_params['FA'])
FA_data = FA_img.get_fdata()
show_anatomical_slices(FA_data, 'Fractional Anisotropy (FA)')
##########################################################################
# Register the individual data to a template:
# -------------------------------------------
# For the purpose of bundle segmentation, the individual brain is registered
# to the MNI T2 template. The waypoint ROIs used in segmentation are then each
# brought into each subject's native space to test streamlines for whether they
# fulfill the segmentation criteria.
#
# .. note::
# To find the right place for the waypoint ROIs, we calculate a non-linear
# transformation between the individual's brain and the MNI T2 template.
# Before calculating this non-linear warping, we perform a pre-alignment
# using an affine transformation.
print("Registering to template...")
MNI_T2_img = afd.read_mni_template()
if not op.exists(op.join(working_dir, 'mapping.nii.gz')):
import dipy.core.gradients as dpg
gtab = dpg.gradient_table(hardi_fbval, hardi_fbvec)
b0 = np.mean(img.get_fdata()[..., gtab.b0s_mask], -1)
# Prealign using affine registration
_, prealign = affine_registration(
b0,
MNI_T2_img.get_fdata(),
img.affine,
MNI_T2_img.affine)
# Then register using a non-linear registration using the affine for
# prealignment
warped_hardi, mapping = reg.syn_register_dwi(hardi_fdata, gtab,
prealign=prealign)
reg.write_mapping(mapping, op.join(working_dir, 'mapping.nii.gz'))
else:
mapping = reg.read_mapping(op.join(working_dir, 'mapping.nii.gz'),
img, MNI_T2_img)
mapping_img = nib.load(op.join(working_dir, 'mapping.nii.gz'))
mapping_img_data = mapping_img.get_fdata()
# Working with diffeomorphic map data
mapping_img_data = mapping_img_data[..., 0, 0]
show_anatomical_slices(mapping_img_data, 'Registration Displacement Mapping')
# plot the transformational map of MNI T2 onto subject space
warped_MNI_T2_data = mapping.transform_inverse(MNI_T2_img.get_fdata())
warped_MNI_T2_img = nib.Nifti1Image(warped_MNI_T2_data.astype(float),
img.affine)
nib.save(warped_MNI_T2_img, op.join(working_dir, 'warped_MNI_T2.nii.gz'))
show_anatomical_slices(warped_MNI_T2_img.get_fdata(), 'Warped MNI T2')
##########################################################################
# Create novel bundle specification:
# ----------------------------------
# The bundles specify meta-data for the segmentation.
#
# The following keys are required for a `bundle` entry:
#
# - `ROIs`
#
# lists the ROI templates.
#
# .. note::
# Order of the `ROIs` matters and may result in different tract profiles.
# Given a sequence of waypoint ROIs the endpoints should appear first. Which
# endpoint appears first should be consistent with the directionality of
# other bundles defintions. Any intermediate waypoints ROIs should respect
# this ordering.
#
# - `rules`
#
# label each ROI as inclusionary `True` or exclusionary `False`.
#
# - `cross_midline`
#
# whether or not the streamline crosses the midline
#
# .. note::
#
# It is also possible to utilize probablity maps to further refine the
# segmentation. If `prob_map` key is not specified the probablities will
# all be ones and same shape as the ROI.
#
# .. note::
#
# If using multiple bundles it is recommended to add an optional identifier
# key such as `uid`. This key is not used by segmentation, but can be helpful
# for debugging or quality control reports.
#
# To identify candidate streamlines for the anterior forceps we will use three
# waypoint ROI templates:
#
# 1. Left anterior frontal,
#
# 2. Right anterior frontal,
#
# 3. and midsaggital.
#
# The templates are first resampled into the MNI space, before
# they are brought into the subject's individual native space.
print("Fetching callosum ROI templates...")
callosal_templates = afd.read_callosum_templates(resample_to=MNI_T2_img)
show_anatomical_slices(callosal_templates["L_AntFrontal"].get_fdata(),
'Left anterior frontal ROI')
show_anatomical_slices(callosal_templates["R_AntFrontal"].get_fdata(),
'Right anterior frontal ROI')
show_anatomical_slices(callosal_templates["Callosum_midsag"].get_fdata(),
'Midsagittal ROI')
print("Creating callosal bundle specification...")
# bundle dict
bundles = {}
# anterior frontal ROIs
bundles["AntFrontal"] = {
'ROIs': [callosal_templates["L_AntFrontal"],
callosal_templates["R_AntFrontal"],
callosal_templates["Callosum_midsag"]],
'rules': [True, True, True],
'cross_midline': True
}
##########################################################################
# Tracking:
# ---------
# Streamlines are generate using DTI and a deterministic tractography
# algorithm. For speed, we seed only within the waypoint ROIs for each bundle.
#
# .. note::
# By default tractography:
#
# - Will identify streamlines with lengths between 10 mm and 1 m, with
# turning angles of less than 30 degrees.
#
# - Is seeded with a single seed in each voxel on each dimension
#
# - Each step is 0.5 mm
#
# .. note::
# In this example tractography results in a large number of candidate
# streamlines for the anterior forceps, but not many streamlines anywhere
# else.
print("Tracking...")
if not op.exists(op.join(working_dir, 'dti_streamlines.trk')):
seed_roi = np.zeros(img.shape[:-1])
for bundle in bundles:
for idx, roi in enumerate(bundles[bundle]['ROIs']):
if bundles[bundle]['rules'][idx]:
warped_roi = transform_inverse_roi(
roi,
mapping,
bundle_name=bundle)
nib.save(nib.Nifti1Image(warped_roi.astype(float), img.affine),
op.join(working_dir, f"{bundle}_{idx+1}.nii.gz"))
warped_roi_img = nib.load(op.join(working_dir,
f"{bundle}_{idx+1}.nii.gz"))
show_anatomical_slices(warped_roi_img.get_fdata(),
f'warped {bundle}_{idx+1} ROI')
# Add voxels that aren't there yet:
seed_roi = np.logical_or(seed_roi, warped_roi)
seed_roi_img = nib.Nifti1Image(seed_roi.astype(float), img.affine)
nib.save(seed_roi_img, op.join(working_dir, 'seed_roi.nii.gz'))
show_anatomical_slices(seed_roi_img.get_fdata(), 'Seed ROI')
tractogram = aft.track(dti_params['params'], seed_mask=seed_roi,
stop_mask=FA_data, stop_threshold=0.1)
save_tractogram(tractogram, op.join(working_dir, 'dti_streamlines.trk'),
bbox_valid_check=False)
tractogram_img = density_map(tractogram, n_sls=1000, to_vox=True)
nib.save(tractogram_img, op.join(working_dir,
'afq_dti_density_map.nii.gz'))
else:
tractogram = load_tractogram(op.join(working_dir, 'dti_streamlines.trk'),
img)
tractogram.to_vox()
##########################################################################
# Segmentation:
# -------------
# In this stage, streamlines are tested for several criteria: whether the
# probability that they belong to a bundle is larger than a threshold (set to
# 0, per default), whether they pass through inclusion ROIs and whether they do
# not pass through exclusion ROIs.
#
# .. note::
# By default segmentation:
#
# - uses Streamlinear Registration algorithm
#
# - does not clip streamlines to be between ROIs
#
# - All b-values less than or equal to 50 are considered to be
# without diffusion weighting.
#
# Segmentation will result in a `fiber_group` for each bundle, which
# contains the following keys:
#
# - `sl`
#
# StatefulTractogram encompassing the selected streamlines
#
# - `idx`
#
# indexes to selected streamlines
#
# .. note::
# Currently it is not possible to define endpoint filters for novel bundles,
# but this is something we expect to address. However we can run
# segmentation by ignoring endpoint filters. This means that additional
# streamlines may be included that would otherwise be excluded.
tractogram_img = nib.load(op.join(working_dir, 'afq_dti_density_map.nii.gz'))
show_anatomical_slices(tractogram_img.get_fdata(), 'DTI Density Map')
print("Segmenting fiber groups...")
segmentation = seg.Segmentation(return_idx=True,
filter_by_endpoints=False)
segmentation.segment(bundles,
tractogram,
fdata=hardi_fdata,
fbval=hardi_fbval,
fbvec=hardi_fbvec,
mapping=mapping,
reg_template=MNI_T2_img)
fiber_groups = segmentation.fiber_groups
for bundle in bundles:
tractogram = StatefulTractogram(fiber_groups[bundle]['sl'].streamlines,
img,
Space.VOX)
tractogram.to_rasmm()
save_tractogram(tractogram, op.join(working_dir, f'afq_{bundle}_seg.trk'),
bbox_valid_check=False)
tractogram_img = density_map(tractogram, n_sls=1000, to_vox=True)
nib.save(tractogram_img, op.join(working_dir,
f'afq_{bundle}_seg_density_map.nii.gz'))
show_anatomical_slices(tractogram_img.get_fdata(),
f'Segmented {bundle} Density Map')
##########################################################################
# Cleaning:
# ---------
# Each fiber group is cleaned to exclude streamlines that are outliers in terms
# of their trajectory and/or length.
#
# .. note::
# By default cleaning
#
# - resamples streamlines to 100 points
#
# - given there are more than 20 streamlines cleaining will make maximum 5
# attempts to prune streamlines that are:
#
# - greater than 5 standard deviations from the mean Mahalanobis distance,
# or
#
# - greather than 4 standard deviations from the mean length
#
print("Cleaning fiber groups...")
for bundle in bundles:
print(f"Cleaning {bundle}...")
print(f"Before cleaning: {len(fiber_groups[bundle]['sl'])} streamlines")
new_fibers, idx_in_bundle = seg.clean_bundle(
fiber_groups[bundle]['sl'],
return_idx=True)
print(f"After cleaning: {len(new_fibers)} streamlines")
idx_in_global = fiber_groups[bundle]['idx'][idx_in_bundle]
np.save(op.join(working_dir, f'{bundle}_idx.npy'), idx_in_global)
tractogram = StatefulTractogram(new_fibers.streamlines,
img,
Space.VOX)
tractogram.to_rasmm()
save_tractogram(tractogram, op.join(working_dir, f'afq_{bundle}.trk'),
bbox_valid_check=False)
tractogram_img = density_map(tractogram, n_sls=1000, to_vox=True)
nib.save(tractogram_img, op.join(working_dir,
f'afq_{bundle}_density_map.nii.gz'))
show_anatomical_slices(tractogram_img.get_fdata(),
f'Cleaned {bundle} Density Map')
##########################################################################
# Visualizing bundles and tract profiles:
# ---------------------------------------
plotly.io.show(visualize_bundles(tractogram,
figure=visualize_volume(warped_MNI_T2_data),
color_by_volume=FA_data))
##########################################################################
# Bundle profiles:
# ----------------
# Streamlines are represented in the original diffusion space (`Space.VOX`) and
# scalar properties along the length of each bundle are queried from this
# scalar data. Here, the contribution of each streamline is weighted according
# to how representative this streamline is of the bundle overall.
#
# .. note::
# As a sanity check the anterior forceps the tract profile is relatively
# symmetric?
print("Extracting tract profiles...")
for bundle in bundles:
print(f"Extracting {bundle}...")
tractogram = load_tractogram(op.join(working_dir, f'afq_{bundle}.trk'),
img, to_space=Space.VOX)
fig, ax = plt.subplots(1)
weights = gaussian_weights(tractogram.streamlines)
profile = afq_profile(FA_data, tractogram.streamlines,
np.eye(4), weights=weights)
ax.plot(profile)
ax.set_title(bundle)
plt.show()
plt.savefig(op.join(working_dir, 'AntFrontal_tractprofile.png'))
##########################################################################
# References:
# -------------------------
# .. [Yeatman2012] Jason D Yeatman, Robert F Dougherty, Nathaniel J Myall,
# Brian A Wandell, Heidi M Feldman, "Tract profiles of
# white matter properties: automating fiber-tract
# quantification", PloS One, 7: e49790
#
# .. [Yeatman2014] Jason D Yeatman, Brian A Wandell, Aviv Mezer Feldman,
# "Lifespan maturation and degeneration of human brain white
# matter", Nature Communications 5: 4932
|
bsd-2-clause
|
ioos/system-test
|
Theme_1_Baseline/Scenario_1C_WebService_Strings/Scenario_1C_WebService_Strings.py
|
2
|
6407
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # IOOS System Test - Theme 1 - Scenario C - [Description](https://github.com/ioos/system-test/wiki/Development-of-Test-Themes#theme-1-baseline-assessment)
#
# ## WebService Strings
# This notebook looks at the a series of typical WebServices. It searches for a list of WebService names (strings) in a list of CSW catalogs and simply lists how many services are avialable from each CSW.
#
# ## Guiding Questions
# Based on a series of WebService Strings, can we access web services via a series of CSW endpoints and quantify those results? Based on those results, is it apparent that some web services are not being discovered as they are utilizing variations on WebService Strings?
# <codecell>
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import csv
import re
import cStringIO
import urllib2
import parser
import pdb
import random
import datetime as dt
from datetime import datetime
from pylab import *
from owslib.csw import CatalogueServiceWeb
from owslib.wms import WebMapService
from owslib.sos import SensorObservationService
from owslib.etree import etree
from owslib import fes
import netCDF4
# from: https://github.com/ioos/catalog/blob/master/ioos_catalog/tasks/reindex_services.py#L43-L58
from datetime import datetime
from urlparse import urlparse
import requests
import xml.etree.ElementTree as ET
from owslib import fes, csw
from owslib.util import nspath_eval
from owslib.namespaces import Namespaces
#import ioos_catalog
#from ioos_catalog import app,db
# <markdowncell>
# ####Search for web services
# This is a collection of lists that we will need to examine Catalogs
# <codecell>
web_service_strings = ['urn:x-esri:specification:ServiceType:OPeNDAP',
'urn:x-esri:specification:ServiceType:odp:url',
'urn:x-esri:specification:ServiceType:WMS',
'urn:x-esri:specification:ServiceType:wms:url',
'urn:x-esri:specification:ServiceType:sos:url',
'urn:x-esri:specification:ServiceType:wcs:url']
services = {'SOS' : 'urn:x-esri:specification:ServiceType:sos:url',
'WMS' : 'urn:x-esri:specification:ServiceType:wms:url',
'WCS' : 'urn:x-esri:specification:ServiceType:wcs:url',
'DAP' : 'urn:x-esri:specification:ServiceType:odp:url' }
# This looks like a good notebook to work from
# https://www.wakari.io/sharing/bundle/rsignell/Model_search
# <markdowncell>
# The next cell lists catalog endpoints. As CSW's are discovered within the larger IOOS Umbrealla, this list is updated by the IOOS Program Office here: https://github.com/ioos/system-test/wiki/Service-Registries-and-Data-Catalogs
# <codecell>
#endpoint = 'http://data.nodc.noaa.gov/geoportal/csw' # NODC Geoportal: collection level
#endpoint = 'http://geodiscover.cgdi.ca/wes/serviceManagerCSW/csw' # NRCAN
#endpoint = 'http://geoport.whoi.edu/gi-cat/services/cswiso' # USGS Woods Hole GI_CAT
#endpoint = 'http://cida.usgs.gov/gdp/geonetwork/srv/en/csw' # USGS CIDA Geonetwork
#endpoint = 'http://www.nodc.noaa.gov/geoportal/csw' # NODC Geoportal: granule level
#endpoint = 'http://cmgds.marine.usgs.gov/geonetwork/srv/en/csw' # USGS Coastal & Marine Program Geonetwork
#endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw' # NGDC Geoportal
#endpoint = 'http://www.ncdc.noaa.gov/cdo-web/api/v2/' #NCDC CDO Web Services
#endpoint = 'http://geo.gov.ckan.org/csw' #CKAN Testing Site for new Data.gov
#endpoint = 'https://edg.epa.gov/metadata/csw' #EPA
#endpoint = 'http://geoport.whoi.edu/geoportal/csw' #WHOI Geoportal
#endpoint = 'http://cwic.csiss.gmu.edu/cwicv1/discovery' #CWIC
#endpoint = 'http://portal.westcoastoceans.org/connect/' #West Coast Governors Alliance (Based on ESRI Geoportal back end
#print out version
#endpoint = 'http://gcmdsrv.gsfc.nasa.gov/csw' #NASA's Global Change Master Directory (GCMD) CSW Service (Requires Authorization)
#endpoint = 'http://gcmdsrv3.gsfc.nasa.gov/csw' #NASA's Global Change Master Directory (GCMD) CSW Service (Requires Authorization)
#endpoint = 'https://data.noaa.gov/csw' #data.noaa.gov csw
endpoints = ['http://www.nodc.noaa.gov/geoportal/csw',
'http://www.ngdc.noaa.gov/geoportal/csw',
'http://catalog.data.gov/csw-all',
'http://cwic.csiss.gmu.edu/cwicv1/discovery',
'http://geoport.whoi.edu/geoportal/csw',
'https://edg.epa.gov/metadata/csw',
'http://cmgds.marine.usgs.gov/geonetwork/srv/en/csw',
'http://cida.usgs.gov/gdp/geonetwork/srv/en/csw',
'http://geodiscover.cgdi.ca/wes/serviceManagerCSW/csw',
'http://geoport.whoi.edu/gi-cat/services/cswiso']
# <markdowncell>
# Test each CSW and look for the specific endpoints. Print the results.
# <codecell>
def service_urls(records,service_string='urn:x-esri:specification:ServiceType:wms:url'):
urls=[]
for key,rec in records.iteritems():
#create a generator object, and iterate through it until the match is found
#if not found, gets the default value (here "none")
url = next((d['url'] for d in rec.references if d['scheme'] == service_string), None)
if url is not None:
urls.append(url)
return urls
records1 = []
titles1 = []
lenrecords1 = []
lentitles1 = []
list1 = []
list2 = []
list3 = []
dict1 = {}
dict2 = {}
dict3 = {}
list4 = []
lenurls = []
for endpoint in endpoints:
try:
csw = CatalogueServiceWeb(endpoint,timeout=100)
csw.getrecords2(maxrecords = 100)
for web_service_string in web_service_strings:
urls1 = service_urls(csw.records,service_string=web_service_string)
list3.append(urls1)
list1.append(web_service_string)
list2.append(endpoint)
list4.append(len(urls1))
dict2['Service_URL']= list1
dict2['endpoint'] = list2
dict2['urls'] = list3
dict2['number_urls'] = list4
except Exception, ex1:
print 'Error'
#dict2['lenrecords'] = lenrecords1
#print dict2
print pd.DataFrame(dict2)
#print pd.DataFrame(dict2.keys())
# <codecell>
|
unlicense
|
mbernico/skaro
|
example/keras_mnist.py
|
1
|
1654
|
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
from keras.regularizers import l2
from keras.optimizers import SGD
from sklearn.metrics import classification_report
from keras.datasets import mnist
from keras.utils import np_utils
# hyperparameters
DROP_OUT = 0.2
BETA = 0.0001 #l2
CLASSES = 10
#load mnist
print("Loading MNIST, This may take a second....")
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print("Data Shape is " + str(X_train.shape))
print("Target Shape is " + str(y_train.shape))
# flatten and normalize the data tensors
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, CLASSES)
Y_test = np_utils.to_categorical(y_test, CLASSES)
model = Sequential()
model.add(Dense(input_dim=784, output_dim=1024, activation='relu', W_regularizer=l2(BETA)))
model.add(Dense(input_dim=1024, output_dim=1024, activation='relu', W_regularizer=l2(BETA)))
model.add(Dense(input_dim=1024, output_dim=1024, activation='relu', W_regularizer=l2(BETA)))
model.add(Dropout(DROP_OUT))
model.add(Dense(input_dim=100, output_dim=10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer="sgd")
model.fit(X_train, Y_train, nb_epoch=1000, verbose=0, batch_size=256)
print( np.argmax(model.predict(X_test), axis=1))
print(classification_report(y_test, np.argmax(model.predict(X_test), axis=1)))
|
gpl-3.0
|
lothian/psi4
|
psi4/driver/qcdb/util/mpl.py
|
7
|
2303
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import numpy as np
def plot_coord(ref, cand=None, orig=None, comment=None):
"""Display target geometry `ref` as black dots in 3D plot. If present, also
plot candidate geometry `cand` as red dots and starting geometry `orig` as
pale blue dots. Plot has text `comment`. For assessing alignment, red and
black should overlap and pale blue shows where red started.
"""
try:
from matplotlib import pyplot
except ImportError:
raise ImportError("""Python module matplotlib not found. Solve by installing it: `conda install matplotlib` or https://matplotlib.org/faq/installing_faq.html""")
from mpl_toolkits.mplot3d import Axes3D
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
bound = max(np.amax(ref), -1 * np.amin(ref))
ax.scatter(ref[:, 0], ref[:, 1], ref[:, 2], c='k', label='goal')
if cand is not None:
ax.scatter(cand[:, 0], cand[:, 1], cand[:, 2], c='r', label='post-align')
if orig is not None:
ax.scatter(orig[:, 0], orig[:, 1], orig[:, 2], c='lightsteelblue', label='pre-align')
if comment is not None:
ax.text2D(0.05, 0.95, comment, transform=ax.transAxes)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim(-bound, bound)
ax.set_ylim(-bound, bound)
ax.set_zlim(-bound, bound)
ax.legend()
pyplot.show()
|
lgpl-3.0
|
sgenoud/scikit-learn
|
sklearn/preprocessing.py
|
1
|
28614
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD
from collections import Sequence
import numpy as np
import scipy.sparse as sp
from .utils import check_arrays
from .utils import warn_if_not_float
from .base import BaseEstimator, TransformerMixin
from .utils.sparsefuncs import inplace_csr_row_normalize_l1
from .utils.sparsefuncs import inplace_csr_row_normalize_l2
from .utils.sparsefuncs import inplace_csr_column_scale
from .utils.sparsefuncs import mean_variance_axis0
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std dev for centering, scaling
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.0] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.Scaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if sp.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
warn_if_not_float(X, estimator='The scale function')
if not sp.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis0(X)
var[var == 0.0] = 1.0
inplace_csr_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
warn_if_not_float(X, estimator='The scale function')
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
if with_std:
Xr /= std_
return X
class Scaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen indepently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Attributes
----------
`mean_` : array of floats with shape [n_features]
The mean value for each feature in the training set.
`std_` : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
if sp.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
warn_if_not_float(X, estimator=self)
copy = self.copy
if not sp.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
self.mean_ = None
_, var = mean_variance_axis0(X)
self.std_ = np.sqrt(var)
self.std_[var == 0.0] = 1.0
return self
else:
X = np.asarray(X)
warn_if_not_float(X, estimator=self)
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
if sp.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
warn_if_not_float(X, estimator=self)
if not sp.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
inplace_csr_column_scale(X, 1 / self.std_)
else:
X = np.asarray(X)
warn_if_not_float(X, estimator=self)
if copy:
X = X.copy()
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
if sp.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sp.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
inplace_csr_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
def normalize(X, norm='l2', axis=1, copy=True):
"""Normalize a dataset along any axis
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0]
warn_if_not_float(X, 'The normalize function')
if axis == 0:
X = X.T
if sp.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)[:, np.newaxis]
norms[norms == 0.0] = 1.0
elif norm == 'l2':
norms = np.sqrt(np.sum(X ** 2, axis=1))[:, np.newaxis]
norms[norms == 0.0] = 1.0
X /= norms
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Parameters
----------
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
The lower bound that triggers feature values to be replaced by 1.0.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_arrays(X, sparse_format='csr', copy=copy)[0]
if sp.issparse(X):
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
# FIXME: if enough values became 0, it may be worth changing
# the sparsity structure
X.data[not_cond] = 0
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
The default threshold is 0.0 so that any non-zero values are set to 1.0
and zeros are left untouched.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modeled using the Bernoulli
distribution in a Bayesian setting).
Parameters
----------
threshold : float, optional (0.0 by default)
The lower bound that triggers feature values to be replaced by 1.0.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
def _is_label_indicator_matrix(y):
return hasattr(y, "shape") and len(y.shape) == 2
def _is_multilabel(y):
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
return not isinstance(y[0], np.ndarray) and isinstance(y[0], Sequence) \
and not isinstance(y[0], basestring) \
or _is_label_indicator_matrix(y)
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
`classes_`: array of shape [n_class]
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6])
array([0, 0, 1, 2])
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"])
array([2, 2, 1])
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelNormalizer was not fitted yet.")
def fit(self, y):
"""Fit label normalizer
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
self : returns an instance of self.
"""
self.classes_ = np.unique(y)
return self
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
y_new = np.zeros(len(y), dtype=int)
for i, k in enumerate(self.classes_[1:]):
y_new[y == k] = i + 1
return y_new
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
y_new = np.zeros(len(y), dtype=self.classes_.dtype)
for i, k in enumerate(self.classes_):
y_new[y == i] = k
return y_new
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label: int (default: 0)
Value with which negative labels must be encoded.
pos_label: int (default: 1)
Value with which positive labels must be encoded.
Attributes
----------
`classes_`: array of shape [n_class]
Holds the label for each class.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.]])
>>> lb.fit_transform([(1, 2), (3,)])
array([[ 1., 1., 0.],
[ 0., 0., 1.]])
>>> lb.classes_
array([1, 2, 3])
"""
def __init__(self, neg_label=0, pos_label=1):
if neg_label >= pos_label:
raise ValueError("neg_label must be strictly less than pos_label.")
self.neg_label = neg_label
self.pos_label = pos_label
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
self : returns an instance of self.
"""
self.multilabel = _is_multilabel(y)
if self.multilabel:
self.indicator_matrix_ = _is_label_indicator_matrix(y)
if self.indicator_matrix_:
self.classes_ = np.arange(y.shape[1])
else:
self.classes_ = np.array(sorted(set.union(*map(set, y))))
else:
self.classes_ = np.unique(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
Y : numpy array of shape [n_samples, n_classes]
"""
self._check_fitted()
if self.multilabel or len(self.classes_) > 2:
if _is_label_indicator_matrix(y):
# nothing to do as y is already a label indicator matrix
return y
Y = np.zeros((len(y), len(self.classes_)))
else:
Y = np.zeros((len(y), 1))
Y += self.neg_label
y_is_multilabel = _is_multilabel(y)
if y_is_multilabel and not self.multilabel:
raise ValueError("The object was not " +
"fitted with multilabel input!")
elif self.multilabel:
if not _is_multilabel(y):
raise ValueError("y should be a list of label lists/tuples,"
"got %r" % (y,))
# inverse map: label => column index
imap = dict((v, k) for k, v in enumerate(self.classes_))
for i, label_tuple in enumerate(y):
for label in label_tuple:
Y[i, imap[label]] = self.pos_label
return Y
else:
y = np.asarray(y)
if len(self.classes_) == 2:
Y[y == self.classes_[1], 0] = self.pos_label
return Y
elif len(self.classes_) >= 2:
for i, k in enumerate(self.classes_):
Y[y == k, i] = self.pos_label
return Y
else:
# Only one class, returns a matrix with all negative labels.
return Y
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array of shape [n_samples, n_classes]
Target values.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
half = (self.pos_label - self.neg_label) / 2.0
threshold = self.neg_label + half
if self.multilabel:
Y = np.array(Y > threshold, dtype=int)
# Return the predictions in the same format as in fit
if self.indicator_matrix_:
# Label indicator matrix format
return Y
else:
# Lists of tuples format
return [tuple(self.classes_[np.flatnonzero(Y[i])])
for i in range(Y.shape[0])]
if len(Y.shape) == 1 or Y.shape[1] == 1:
y = np.array(Y.ravel() > threshold, dtype=int)
else:
y = Y.argmax(axis=1)
return self.classes_[y]
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
This is equivalent to centering phi(X) with
sklearn.preprocessing.Scaler(with_std=False).
"""
def fit(self, K):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, copy=True):
"""Center kernel
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
|
bsd-3-clause
|
zorojean/scikit-learn
|
examples/linear_model/plot_sgd_comparison.py
|
167
|
1659
|
"""
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
|
bsd-3-clause
|
chreman/visualizations
|
dictionaries/dictionaries.py
|
2
|
2209
|
# main.py
import pandas as pd
from bokeh.layouts import column, row
from bokeh.plotting import Figure, show
from bokeh.embed import standalone_html_page_for_models
from bokeh.models import ColumnDataSource, HoverTool, HBox, VBox
from bokeh.models.widgets import Slider, Select, TextInput, RadioGroup, Paragraph, Div
from bokeh.io import curdoc, save
from bokeh.charts import HeatMap, bins, output_file, vplot, TimeSeries, Line
from bokeh.models import FixedTicker, SingleIntervalTicker, ColumnDataSource, DataRange1d
from bokeh.layouts import widgetbox
from bokeh.layouts import gridplot
import bokeh.palettes as palettes
from bokeh.resources import INLINE, CDN
import pickle
import gzip
with gzip.open("../data/dist_features.pklz", "rb") as infile:
dist = pickle.load(infile)
dist.index=pd.to_datetime(dist.index)
dist = dist.groupby(pd.TimeGrouper(freq="D")).sum()
if len(dist) > 60:
dist = dist.groupby(pd.TimeGrouper(freq="M")).sum()
if len(dist) > 24:
dist = dist.groupby(pd.TimeGrouper(freq="A")).sum()
share = (dist.T / dist.sum(axis=1)).T
#dist["date"] = dist.index
dictionaries = sorted(dist.columns.tolist())
resources = INLINE
colors=palettes.Paired10
ts_abs = TimeSeries(dist, tools="pan,wheel_zoom,reset,save", active_scroll='wheel_zoom',
width=800, height=400,
title='Frequencies of dictionaries - absolute counts', legend=True)
ts_abs.legend.orientation = "horizontal"
ts_abs.legend.location = "top_center"
ts_abs.legend.background_fill_alpha = 0
ts_abs.legend.border_line_alpha = 0
ts_abs.tools[2].reset_size=False
ts_share = TimeSeries(share, tools="pan,wheel_zoom,reset,save", active_scroll='wheel_zoom',
width=800, height=400,
title='Frequencies of dictionaries - relative share', legend=True)
ts_share.x_range = ts_abs.x_range
ts_share.legend.orientation = "horizontal"
ts_share.legend.location = "top_center"
ts_share.legend.background_fill_alpha = 0
ts_share.legend.border_line_alpha = 0
ts_share.tools[2].reset_size=False
### LAYOUT
layout = column(ts_abs, ts_share)
curdoc().add_root(layout)
curdoc().title = "Exploring aggregated counts of facts over dictionaries"
|
mit
|
roim/PyTranscribe
|
plotting/tonguing.py
|
1
|
2729
|
# Copyright 2015 Rodrigo Roim Ferreira
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
""" Contains functions to plot graphs related to tonguing detection. """
import matplotlib.pyplot as _pl
import numpy as _np
import soundfiles as _sf
import tonguing as _tong
def plot_tonguing(audiopath, title="", duration=3, plotpath=None):
""" Plots a visual representation of the tonguing detection algorithm. """
samplerate, samples = _sf.readfile(audiopath)
if samples.size/samplerate < 3:
raise Exception("Input too short")
samples = samples[0:samplerate*duration]
envelope = _tong._envelope(samples)
smooth = _tong._exponential_smoothing(envelope, x_s0=_np.mean(samples[0:50]))
f, (ax0, ax1, ax2, ax3) = _pl.subplots(4, sharex=True)
ax0.plot(samples)
ax1.plot(_np.abs(samples))
ax2.plot(envelope)
ax3.plot(smooth)
ax0.set_title(title)
xlocs = _np.float32([samplerate*i/2 for i in range(2*duration + 1)])
ax3.set_xlabel("Time (s)")
ax3.set_xlim([0, _np.max(xlocs)])
ax3.set_xticks(xlocs)
ax3.set_xticklabels(["%.2f" % (l/samplerate) for l in xlocs])
ax0.set_ylabel("Signal")
ax1.set_ylabel("Signal (Absolute)")
ax2.set_ylabel("Hilbert Envelope")
ax3.set_ylabel("Smoothed Envelope")
if plotpath:
_pl.savefig(plotpath, bbox_inches="tight")
else:
_pl.show()
_pl.clf()
return
def plot_amplitude(audiopath, title="", duration=3, plotpath=None):
""" Plots the amplitude of an audio signal over time. """
samplerate, samples = _sf.readfile(audiopath)
if samples.size/samplerate < 3:
raise Exception("Input too short")
samples = samples[0:samplerate*duration]
_pl.figure(figsize=(10, 3))
_pl.plot(samples)
_pl.title(title)
xlocs = _np.float32([samplerate*i/2 for i in range(2*duration + 1)])
_pl.xlabel("Time (s)")
_pl.xlim([0, _np.max(xlocs)])
_pl.xticks(xlocs, ["%.2f" % (l/samplerate) for l in xlocs])
_pl.ylabel("Amplitude")
if plotpath:
_pl.savefig(plotpath, bbox_inches="tight")
else:
_pl.show()
_pl.clf()
return
|
gpl-3.0
|
mne-tools/mne-tools.github.io
|
0.14/_downloads/plot_compute_rt_average.py
|
7
|
1912
|
"""
========================================================
Compute real-time evoked responses using moving averages
========================================================
This example demonstrates how to connect to an MNE Real-time server
using the RtClient and use it together with RtEpochs to compute
evoked responses using moving averages.
Note: The MNE Real-time server (mne_rt_server), which is part of mne-cpp,
has to be running on the same computer.
"""
# Authors: Martin Luessi <[email protected]>
# Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.realtime import RtEpochs, MockRtClient
print(__doc__)
# Fiff file to simulate the realtime client
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# select gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
# select the left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
# create the mock-client object
rt_client = MockRtClient(raw)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
decim=1, reject=dict(grad=4000e-13, eog=150e-6))
# start the acquisition
rt_epochs.start()
# send raw buffers
rt_client.send_data(rt_epochs, picks, tmin=0, tmax=150, buffer_size=1000)
for ii, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ii + 1))
ev.pick_types(meg=True, eog=False) # leave out the eog channel
if ii == 0:
evoked = ev
else:
evoked = mne.combine_evoked([evoked, ev], weights='nave')
plt.clf() # clear canvas
evoked.plot(axes=plt.gca()) # plot on current figure
plt.pause(0.05)
|
bsd-3-clause
|
yogo1212/RIOT
|
tests/pkg_tensorflow-lite/mnist/generate_digit.py
|
19
|
1164
|
#!/usr/bin/env python3
"""Generate a binary file from a sample image of the MNIST dataset.
Pixel of the sample are stored as float32, images have size 28x28.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (mnist_test, _) = mnist.load_data()
data = mnist_test[args.index]
data = data.astype('uint8')
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data, output_path)
if args.no_plot is False:
plt.gray()
plt.imshow(data.reshape(28, 28))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in MNIST test dataset")
parser.add_argument("-o", "--output", type=str, default='digit',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
|
lgpl-2.1
|
sequana/sequana
|
doc/module_example.py
|
1
|
1895
|
from numpy import random
import pandas as pd
from sequana.modules_report.base_module import SequanaBaseModule
from sequana.utils.datatables_js import DataTable
class MyModule(SequanaBaseModule):
def __init__(self, df, output="mytest.html"):
super().__init__()
self.data = df
self.summary = self.data.describe().to_frame()
self.title = "Super Module"
self.create_report_content()
self.create_html(output)
def create_report_content(self):
self.sections = list()
self.add_table()
self.add_image()
def add_table(self):
df = self.summary.copy()
df.columns = ['data']
df['url'] = ['http://sequana.readthedocs.org'] * len(df)
table = DataTable(df, "table", index=True)
table.datatable.datatable_options = {
'scrollX': '300px',
'pageLength': 15,
'scrollCollapse': 'true',
'dom': 'tB',
"paging": "false",
'buttons': ['copy', 'csv']}
table.datatable.set_links_to_column('url', 'data')
js = table.create_javascript_function()
html_tab = table.create_datatable(float_format='%.3g')
html = "{} {}".format(html_tab, js)
self.sections.append({
"name": "Table",
"anchor": "table",
"content": html
})
def add_image(self):
import pylab
def plotter(filename):
pylab.ioff()
self.data.hist()
pylab.savefig(filename)
html = self.create_embedded_png(plotter, "filename",
style='width:65%')
self.sections.append({
"name": "Image",
"anchor": "table",
"content": html
})
# Let us create some data.
df = pd.Series(random.randn(10000))
# and pass it as a first argument.
MyModule(df, "report_example.html")
|
bsd-3-clause
|
drpjm/udacity-mle-project1
|
boston_housing.py
|
1
|
7856
|
"""Load the Boston dataset and examine its target (label) distribution."""
# (c) 2015 Patrick Martin and Udacity
# MIT License
# Load libraries
import numpy as np
import pylab as pl
import sklearn as skl
from sklearn import datasets
from sklearn.tree import DecisionTreeRegressor
################################
### ADD EXTRA LIBRARIES HERE ###
################################
from sklearn.grid_search import GridSearchCV
def load_data():
"""Load the Boston dataset."""
boston = datasets.load_boston()
return boston
def explore_city_data(city_data):
"""Calculate the Boston housing statistics."""
# Get the labels and features from the housing data
housing_prices = city_data.target
housing_features = city_data.data
###################################
### Step 1. YOUR CODE GOES HERE ###
###################################
# Please calculate the following values using the Numpy library
# Size of data (number of houses)?
num_houses = housing_prices.size
print "# houses = " + str(num_houses)
# Number of features?
num_features = housing_features[1].size
print "# house features = " + str(num_features)
# Minimum price?
min_house_price = np.amin(housing_prices)
print "min home price = " + str(min_house_price)
# Maximum price?
max_house_price = np.amax(housing_prices)
print "max home price = " + str(max_house_price)
# Calculate mean price?
mean_home_price = np.mean(housing_prices)
print "mean home price = " + str(mean_home_price)
# Calculate median price?
median_home_price = np.median(housing_prices)
print "median home price = " + str(median_home_price)
# Calculate standard deviation?
std_dev = np.std(housing_prices)
print "std dev prices = " + str(std_dev)
def performance_metric(label, prediction):
"""Calculate and return the appropriate error performance metric."""
###################################
### Step 2. YOUR CODE GOES HERE ###
###################################
# Perform a mean square error metric - want to penalize outliers.
return skl.metrics.mean_squared_error(label, prediction)
def split_data(city_data):
"""Randomly shuffle the sample set. Divide it into 70 percent training and 30 percent testing data."""
# Get the features and labels from the Boston housing data
X, y = city_data.data, city_data.target
###################################
### Step 3. YOUR CODE GOES HERE ###
###################################
X_train, X_test, y_train, y_test = skl.cross_validation.train_test_split(X, y, test_size=0.3, random_state=42)
return X_train, X_test, y_train, y_test
def learning_curve(depth, X_train, y_train, X_test, y_test):
"""Calculate the performance of the model after a set of training data."""
# We will vary the training set size so that we have 50 different sizes
sizes = np.linspace(1, len(X_train), 50)
train_err = np.zeros(len(sizes))
test_err = np.zeros(len(sizes))
print "Decision Tree with Max Depth: "
print depth
for i, s in enumerate(sizes):
# Create and fit the decision tree regressor model
regressor = DecisionTreeRegressor(max_depth=depth)
regressor.fit(X_train[:s], y_train[:s])
# Find the performance on the training and testing set
train_err[i] = performance_metric(y_train[:s], regressor.predict(X_train[:s]))
test_err[i] = performance_metric(y_test, regressor.predict(X_test))
# Plot learning curve graph
learning_curve_graph(sizes, train_err, test_err)
def learning_curve_graph(sizes, train_err, test_err):
"""Plot training and test error as a function of the training size."""
pl.figure()
pl.title('Decision Trees: Performance vs Training Size')
pl.plot(sizes, test_err, lw=2, label = 'test error')
pl.plot(sizes, train_err, lw=2, label = 'training error')
pl.legend()
pl.xlabel('Training Size')
pl.ylabel('Error')
pl.show()
def model_complexity(X_train, y_train, X_test, y_test):
"""Calculate the performance of the model as model complexity increases."""
print "Model Complexity: "
# We will vary the depth of decision trees from 2 to 25
max_depth = np.arange(1, 25)
train_err = np.zeros(len(max_depth))
test_err = np.zeros(len(max_depth))
for i, d in enumerate(max_depth):
# Setup a Decision Tree Regressor so that it learns a tree with depth d
regressor = DecisionTreeRegressor(max_depth=d)
# Fit the learner to the training data
regressor.fit(X_train, y_train)
# Find the performance on the training set
train_err[i] = performance_metric(y_train, regressor.predict(X_train))
# Find the performance on the testing set
test_err[i] = performance_metric(y_test, regressor.predict(X_test))
# Plot the model complexity graph
model_complexity_graph(max_depth, train_err, test_err)
def model_complexity_graph(max_depth, train_err, test_err):
"""Plot training and test error as a function of the depth of the decision tree learn."""
pl.figure()
pl.title('Decision Trees: Performance vs Max Depth')
pl.plot(max_depth, test_err, lw=2, label = 'test error')
pl.plot(max_depth, train_err, lw=2, label = 'training error')
pl.legend()
pl.xlabel('Max Depth')
pl.ylabel('Error')
pl.show()
def fit_predict_model(city_data):
"""Find and tune the optimal model. Make a prediction on housing data."""
# Get the features and labels from the Boston housing data
X, y = city_data.data, city_data.target
# Setup a Decision Tree Regressor
dtr = DecisionTreeRegressor()
# print "DTR parameters"
# print dtr.get_params()
dtr_parameters = {'max_depth':(1,2,3,4,5,6,7,8,9,10)}
###################################
### Step 4. YOUR CODE GOES HERE ###
###################################
# 1. Find the best performance metric
# Referenced users guide for scoring.
mse_scorer = skl.metrics.make_scorer(skl.metrics.mean_squared_error, greater_is_better=False)
# 2. Use gridsearch to fine tune the Decision Tree Regressor and find the best model
# Referenced lecture and the GridSearch Users Guide docs.
tuned_dtr = GridSearchCV(dtr, dtr_parameters, mse_scorer)
# Fit the learner to the training data
print "Final Model: "
print tuned_dtr.fit(X, y)
print "Tuned DTR depth = " + str(tuned_dtr.best_estimator_.max_depth)
# Use the model to predict the output of a particular sample
x = [11.95, 0.00, 18.100, 0, 0.6590, 5.6090, 90.00, 1.385, 24, 680.0, 20.20, 332.09, 12.13]
y = tuned_dtr.predict(x)
print "House: " + str(x)
print "Prediction: " + str(y)
def main():
"""Analyze the Boston housing data. Evaluate and validate the
performanance of a Decision Tree regressor on the housing data.
Fine tune the model to make prediction on unseen data."""
print "Starting!"
# Load data
city_data = load_data()
# Explore the data
explore_city_data(city_data)
# Training/Test dataset split
X_train, X_test, y_train, y_test = split_data(city_data)
# print "X train shape = " + str(X_train.shape)
# print "X test shape = " + str(X_test.shape)
# print "y train shape = " + str(y_train.shape)
# print "y test shape = " + str(y_test.shape)
# Learning Curve Graphs
# max_depths = [1,2,3,4,5,6,7,8,9,10]
# for max_depth in max_depths:
# learning_curve(max_depth, X_train, y_train, X_test, y_test)
# Model Complexity Graph
# model_complexity(X_train, y_train, X_test, y_test)
# Tune and predict Model
# For report writing...
# idxs = [1,2,3,4,5]
# for i in idxs:
fit_predict_model(city_data)
if __name__ == "__main__":
main()
|
mit
|
victor-gil-sepulveda/PhD-ANMPythonHelpers
|
nma_algo_char/domain_distances_from_logs.py
|
1
|
2595
|
'''
Created on Jan 28, 2016
@author: victor
'''
from nma_algo_char.common import MetropolisMCSimulator, prepare_subplots
from nma_algo_char.acceptance_and_rmsf_from_logs import load_single_proc_data
import os.path
from nma_algo_char.mode_application_analysis import process_energy_differences
import numpy
from anmichelpers.tools.tools import norm
from optparse import OptionParser
import matplotlib.pyplot as plt
import seaborn as sns
def calc_distances(in_coords, who_is_accepted):
coords = in_coords[who_is_accepted]
distances = []
for coordset in coords:
distances.append(norm(coordset[128]-coordset[18]))
return numpy.array(distances)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--type", dest="sim_type")
parser.add_option("-t", type="int", dest="temperature")
(options, args) = parser.parse_args()
folders = [
"IC_dispFact_0.65_dm_1",
"IC_dispFact_0.65_dm_2",
"IC_dispFact_0.65_dm_3",
"IC_dispFact_0.65_dm_4",
"IC_dispFact_0.65_dm_5",
"IC_dispFact_0.65_dm_6",
"IC_dispFact_0.65_dm_7",
"IC_dispFact_0.65_dm_8",
"IC_dispFact_0.65_dm_9",
"IC_dispFact_0.65_dm_10"
]
distances = {}
for folder in folders:
raw_data, min_len = load_single_proc_data(options.sim_type, os.path.join(folder,"info"))
energy_increments = process_energy_differences(raw_data)
mc = MetropolisMCSimulator(energy_increments)
who_is_accepted = mc.who_is_accepted(options.temperature)
coords = numpy.reshape(raw_data["coords_after"], (len(raw_data["coords_after"]),
len(raw_data["coords_after"][0])/3,
3))
distances[folder] = calc_distances(coords, range(len(coords)))#who_is_accepted[:150])
sns.set_style("whitegrid")
row_len = 4
col_len = 3
folders.extend(["IC_dispFact_0.65_dm_10","IC_dispFact_0.65_dm_10"])
f, axes = prepare_subplots(row_len, col_len)
for i,folder in enumerate(folders):
ax = axes[i/row_len, i%row_len]
ax.set_title(folder)
if i%row_len == 0:
ax.set_ylabel("Distance ($\AA$)")
if i/row_len == col_len-1:
ax.set_xlabel("Step")
ax.plot(distances[folder])
plt.suptitle("Inter domain distance")
plt.show()
|
mit
|
michaelneuder/image_quality_analysis
|
bin/calculations/scratch/SSIM_calculator.py
|
1
|
3230
|
#!/usr/bin/env python3
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from PIL import Image as im
def get_2d_list_slice(matrix, start_row, end_row, start_col, end_col):
return np.asarray([row[start_col:end_col] for row in matrix[start_row:end_row]])
def get_SSIM_window(matrix, row_center, col_center, padding):
return get_2d_list_slice(matrix, row_center-padding, row_center+padding+1, col_center-padding, col_center+padding+1)
def calculate_ssim(window_orig, window_recon):
k_1 = 0.01
k_2 = 0.03
L = 255
if window_orig.shape != (11,11) or window_recon.shape != (11,11):
raise ValueError('please check window size for SSIM calculation!')
else:
orig_data = window_orig.flatten()
recon_data = window_recon.flatten()
mean_x = np.mean(orig_data)
mean_y = np.mean(recon_data)
var_x = np.var(orig_data)
var_y = np.var(recon_data)
covar = np.cov(orig_data, recon_data)[0][1]
c_1 = (L*k_1)**2
c_2 = (L*k_2)**2
num = (2*mean_x*mean_y+c_1)*(2*covar+c_2)
den = (mean_x**2+mean_y**2+c_1)*(var_x+var_y+c_2)
return num/den
def main():
print('importing image files ...')
orig_images = np.loadtxt('../../data/sample_data/orig_140.txt')
recon_images = np.loadtxt('../../data/sample_data/recon_140.txt')
num_images = orig_images.shape[0]
image_dimension = int(np.sqrt(orig_images.shape[1]))
# reshape to add padding --- care must be taken as padding can mess things up
orig_images = np.reshape(orig_images, [num_images,image_dimension,image_dimension])
recon_images = np.reshape(recon_images, [num_images,image_dimension,image_dimension])
# adding padding for SSIM calcs
print('padding images ...')
padding = 5
orig_padded = []
recon_padded = []
for i in range(num_images):
orig_padded.append(np.pad(orig_images[i], pad_width=padding, mode='edge'))
recon_padded.append(np.pad(recon_images[i], pad_width=padding, mode='edge'))
orig_padded = np.asarray(orig_padded)
recon_padded = np.asarray(recon_padded)
# iterating through each pixel of original image, and get 11x11 window for calculation
print('calculating SSIM scores ...')
SSIM_scores = np.zeros(shape=(num_images, image_dimension,image_dimension))
for image in range(num_images):
print(' - image {}'.format(image))
for row in range(padding,orig_padded.shape[1]-padding):
for col in range(padding,orig_padded.shape[1]-padding):
current_window_orig = get_SSIM_window(orig_padded[image], row, col, padding)
current_window_recon = get_SSIM_window(recon_padded[image], row, col, padding)
score = calculate_ssim(current_window_orig, current_window_recon)
SSIM_scores[image ,row-padding, col-padding] = score
print('mean SSIM score = {:.4f}, std dev of SSIM scores = {:.4f}'.format(SSIM_scores.mean(), SSIM_scores.std()))
# write to file
SSIM_scores_write = np.reshape(SSIM_scores, [num_images,image_dimension**2])
np.savetxt('SSIM_{}.txt'.format(num_images), SSIM_scores_write, fmt="%.6f")
if __name__ == '__main__':
main()
|
mit
|
nhejazi/scikit-learn
|
sklearn/tests/test_random_projection.py
|
141
|
14040
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
|
bsd-3-clause
|
reflectometry/osrefl
|
osrefl/viewers/wxzslice.py
|
1
|
12470
|
# Copyright (C)
# All rights reserved.
# See LICENSE.txt for details.
# Author: Brian Maranville, Christopher Metting
#Starting Date:8/23/2010
from numpy import arctan2,indices,array,ma,pi,amin,amax,nan,degrees, isfinite
import matplotlib,wx
from matplotlib.widgets import RectangleSelector
from matplotlib.blocking_input import BlockingInput
from matplotlib.colors import LinearSegmentedColormap
from pylab import figure,show,imshow,draw, delaxes,cla
# Disable interactive mode so that plots are only updated on show() or draw().
# Note that the interactive function must be called before selecting a backend
# or importing pyplot, otherwise it will have no effect.
matplotlib.interactive(False)
# Specify the backend to use for plotting and import backend dependent classes.
# Note that this must be done before importing pyplot to have an effect.
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2Wx as Toolbar
# The Figure object is used to create backend-independent plot representations.
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
from pylab import get_current_fig_manager as gcfm
# Wx-Pylab magic ...
from matplotlib import _pylab_helpers
from matplotlib.backend_bases import FigureManagerBase
def momentView(omf):
app = wx.PySimpleApp()
app.Frame = MomentFrame(-1,omf)
app.Frame.Show(True)
app.MainLoop()
app.Destroy()
return()
class MomentFrame(wx.Frame):
'''
'''
def __init__(self,parent,omf):
wx.Frame.__init__(self,None,-1,"Z Slice Viewer")
self.omf = omf
self.parent = parent
self.panel = ZMomentplot(self,-1)
self.Fit()
class ZMomentplot(wx.Panel):
def __init__(self,frame,id):
wx.Panel.__init__(self,frame,id,style = wx.BORDER_RAISED)
z_layer = 0
self.omf = frame.omf
self.halfstep = (float(self.omf.parameters['zstepsize'])/2.0)*1.0e10
self.figure = Figure(frameon = True)
self.figure.set_facecolor('.82')
self.canvas = FigureCanvas(self, -1, self.figure)
fm = FigureManagerBase(self.canvas, 0)
_pylab_helpers.Gcf.set_active(fm)
self.wheel_cmap = LinearSegmentedColormap.from_list('wheel_rgby',
['red', 'green', 'blue', 'yellow', 'red'])
mpl_toolbar = Toolbar(self.canvas)
mpl_toolbar.Realize()
x,y = indices((100,100), dtype = float) - 50.
wheel_angle = arctan2(x,y)
self.ax1 = self.figure.add_axes([0.1,0.1,0.7,0.8])
self.angle = ma.array(arctan2(-self.omf.my[:,:,z_layer],
self.omf.mx[:,:,z_layer]),
mask=(self.omf.M[:,:,z_layer] == 0.0))
self.angle[self.angle==360] = 0.0
print ma.getdata(self.angle)
xmax = (float(self.omf.parameters['xnodes']) *
float(self.omf.parameters['xstepsize']) * 1.0e10)
ymax = (float(self.omf.parameters['ynodes']) *
float(self.omf.parameters['ystepsize']) * 1.0e10)
self.extent = [0., xmax, 0., ymax]
self.im = self.ax1.imshow(self.angle.T, origin='lower',
interpolation = 'nearest',
extent = self.extent,
cmap = self.wheel_cmap,
vmin = -pi, vmax = pi)
self.ax1.set_title('Z Slice = ' +
str(z_layer*float(self.omf.parameters['zstepsize'])* 1.0e10
+ self.halfstep) +' Ang' ,size = 'xx-large')
self.ax1.set_xlabel('$x (\AA)$', size = 'x-large')
self.ax1.set_ylabel('$y (\AA)$', size = 'x-large')
self.ax1w = self.figure.add_axes([0.75,0.4,0.3,0.2], polar=True)
self.ax1w.yaxis.set_visible(False)
self.ax1w.imshow(wheel_angle, cmap=self.wheel_cmap,
extent=[0,2*pi,0,pi])
self.ax1w.set_title('M direction\n(in-plane)')
self.zselect = wx.Slider(self,-1,size = [300,40],minValue = int(0),
maxValue = int(self.omf.dims[2]-1),
style = wx.SL_AUTOTICKS)
self.datavalue = wx.StatusBar(self,-1)
self.datavalue.SetStatusText('Angle Value: ')
self.label = wx.StaticText(self,-1,'Select Z layer: ')
self.minVal = wx.StaticText(self,-1, '0.0')
self.maxVal = wx.StaticText(self,-1, str(self.omf.dims[2]*
(float(self.omf.parameters['zstepsize'])* 1.0e10)
-self.halfstep))
#Sizer Creation
toolSize = wx.BoxSizer(wx.HORIZONTAL)
BotSize = wx.BoxSizer(wx.HORIZONTAL)
vertSize = wx.BoxSizer(wx.VERTICAL)
BotSize.Add(self.label,0,wx.LEFT|wx.RIGHT,border = 5)
BotSize.Add(self.minVal,0,wx.LEFT|wx.RIGHT,border = 5)
BotSize.Add(self.zselect,0,wx.TOP|wx.LEFT|wx.RIGHT,border = 5)
BotSize.Add(self.maxVal,0,wx.LEFT|wx.RIGHT,border = 5)
toolSize.Add(mpl_toolbar,0,wx.RIGHT,border = 20)
toolSize.Add(self.datavalue,0,wx.LEFT|wx.RIGHT|wx.TOP,border = 20)
vertSize.Add(self.canvas,-1,wx.EXPAND|wx.LEFT|wx.RIGHT,border = 5)
vertSize.Add(toolSize,0)
vertSize.Add(BotSize,0,wx.ALL, border = 10)
self.SetSizer(vertSize)
self.Fit()
self.zselect.Bind(wx.EVT_SCROLL,self.newMomentZ)
self.canvas.mpl_connect('motion_notify_event',self.onMouseOver)
def newMomentZ(self,event):
z_layer = self.zselect.GetValue()
Zvalue = str(float(self.omf.parameters['zstepsize'])*z_layer)
self.ax1.set_title('Moment at Z = '+ str(Zvalue))
self.angle = ma.array(arctan2(-self.omf.my[:,:,z_layer],
self.omf.mx[:,:,z_layer]),
mask=(self.omf.M[:,:,z_layer] == 0))
self.im.set_data(self.angle.T)
self.ax1.set_title('Z Slice = ' +
str(z_layer*float(self.omf.parameters['zstepsize'])* 1.0e10
+ self.halfstep) + ' Ang',size = 'xx-large')
draw()
def onMouseOver(self, event):
"""
"""
if event.inaxes == self.ax1:
if (event.xdata != None and event.ydata != None):
xidx = (int(event.xdata/
(float(self.omf.parameters['xstepsize'])*1.0e10)))
yidx = (int(event.ydata/
(float(self.omf.parameters['ystepsize'])*1.0e10)))
value = self.angle[xidx,yidx]
if ma.getmask(self.angle)[xidx,yidx]:
self.datavalue.SetStatusText('Angle Value: MASKED')
else:
value = -degrees(value)
if (value < 0.0): value +=360
self.datavalue.SetStatusText('Angle Value: '
+str('%.2f'%value))
else:
self.datavalue.SetLabel('')
return
def unitView(v,extent = None, step = None, n = None):
'''
**Overview:**
This method is used to plot an array with a slicing option in the z
direction. The plotter contains a slider bar which allows the user to
scroll through the different layers and view the x-y slices. This method
is more reliable and less expensive than the mlab.contour3d method
which is still included in the software functionality.
**Parameters:**
*v:* (float:3D array|angstroms^-2)
This is the 3D array which will be plotted. It is generally used for
viewing the unit cell SLD object, however may be extended to other
uses.
'''
from numpy import shape,array
dim = shape(array(v))
print dim
if extent == None:
extent = array([[0,dim[0]],[0,dim[1]],[0,dim[2]]])
if step == None:
step = [1,1,1]
if n == None:
n = dim
app = wx.PySimpleApp()
app.Frame = unitFrame(-1,v,extent,step,n)
app.Frame.Show(True)
app.MainLoop()
app.Destroy()
return()
class unitFrame(wx.Frame):
'''
'''
def __init__(self,parent,v,extent,step,n):
wx.Frame.__init__(self,None,-1,"Z Slice Viewer")
self.v = v
self.extent = extent
self.step = step
self.n = n
self.parent = parent
self.panel = ZUnitPlot(self,-1)
self.Fit()
class ZUnitPlot(wx.Panel):
def __init__(self,frame,id):
wx.Panel.__init__(self,frame,id,style = wx.BORDER_RAISED)
self.v = frame.v
self.extent = frame.extent
self.step = frame.step
self.n = frame.n
z_layer = 0
self.halfstep = self.step[2]/2.0
self.figure = Figure(frameon = True)
self.figure.set_facecolor('.82')
self.canvas = FigureCanvas(self, -1, self.figure)
fm = FigureManagerBase(self.canvas, 0)
_pylab_helpers.Gcf.set_active(fm)
self.ax1 = self.figure.add_axes([0.1,0.1,0.7,0.8])
plotExtent = ([self.extent[0,0],self.extent[0,1],
self.extent[1,0],self.extent[1,1]])
self.im = self.ax1.imshow(self.v[:,:,z_layer].T,
origin='lower',
interpolation = 'nearest',
extent = plotExtent,
vmin = amin(self.v),
vmax = amax(self.v))
self.ax1.set_title('Z Slice = ' +
str(z_layer *self.step[2] + self.halfstep) +
' Ang' ,size = 'xx-large')
self.figure.colorbar(self.im,format = '%.2e')
mpl_toolbar = Toolbar(self.canvas)
mpl_toolbar.Realize()
self.zselect = wx.Slider(self,-1,size = [300,40],minValue = int(0),
maxValue = int(self.n[2]-1),
style = wx.SL_AUTOTICKS)
print self.extent[2,1]
print self.halfstep
print 'TEST', str(self.extent[2,1] - self.halfstep)
self.label = wx.StaticText(self,-1,'Select Z layer: ')
self.minVal = wx.StaticText(self,-1, '0.0')
self.maxVal = wx.StaticText(self,-1, str(self.extent[2,1]
- self.halfstep))
BotSize = wx.BoxSizer(wx.HORIZONTAL)
vertSize = wx.BoxSizer(wx.VERTICAL)
BotSize.Add(self.label,0,wx.LEFT|wx.RIGHT,border = 5)
BotSize.Add(self.minVal,0,wx.LEFT|wx.RIGHT,border = 5)
BotSize.Add(self.zselect,0,wx.TOP|wx.LEFT|wx.RIGHT,border = 5)
BotSize.Add(self.maxVal,0,wx.LEFT|wx.RIGHT,border = 5)
vertSize.Add(self.canvas,-1,wx.EXPAND|wx.LEFT|wx.RIGHT)
vertSize.Add(mpl_toolbar,0)
vertSize.Add(BotSize,0,wx.ALL, border = 10)
self.SetSizer(vertSize)
self.Fit()
self.zselect.Bind(wx.EVT_SCROLL,self.newUnitZ)
def newUnitZ(self,event):
z_layer = self.zselect.GetValue()
Zvalue = str(float(self.step[2])*z_layer)
self.ax1.set_title('Moment at Z = '+ str(Zvalue))
newData = self.v[:,:,z_layer]
self.im.set_data(newData.T)
self.ax1.set_title('Z Slice = ' +
str(z_layer *self.step[2] + self.halfstep) +
' Ang' ,size = 'xx-large')
draw()
def _test():
from omfLoader import *
from numpy import asarray
mag = Omf('/home/mettingc/Documents/BBM/c_500mTz_-Oxs_MinDriver-Magnetization-05-0005651.omf')
#mag = Omf('/home/mettingc/Documents/test.omf')
momentView(mag)
import sample_prep
Au = (sample_prep.Parallelapiped(SLD = 4.506842e-6,
Ms = 8.6e5,dim=[5.0e4,5.0e4,2.0e4]))
Cr = (sample_prep.Layer(SLD = 3.01e-6,Ms = 8.6e5,
thickness_value = 1000.0))
#Au.on_top_of(Cr)
scene = sample_prep.Scene([Au])
GeoUnit = (sample_prep.GeomUnit(Dxyz = [10.0e4,10.0e4,2.2e4],
n = [20,25,20],scene = scene))
cell = GeoUnit.buildUnit()
print cell.step
unitView(cell.unit,asarray([[0,cell.Dxyz[0]],[0,cell.Dxyz[1]],[0,cell.Dxyz[2]]]),cell.step,cell.n)
if __name__=="__main__":_test()
|
bsd-3-clause
|
nmetts/sp2016-csci7000-bda-project
|
classify.py
|
1
|
25742
|
'''
Created on Mar 5, 2016
@author: Nicolas Metts
'''
import argparse
import csv
from sklearn import grid_search
from sklearn import cross_validation
from sklearn.cross_validation import KFold
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble.forest import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import Perceptron, SGDClassifier
from sklearn.linear_model.passive_aggressive import PassiveAggressiveClassifier
from sklearn.metrics import precision_score, recall_score, roc_auc_score
from sklearn.metrics.classification import accuracy_score
from sklearn.preprocessing import RobustScaler, StandardScaler
from sklearn.svm import SVC
from sklearn.tree.tree import DecisionTreeClassifier
from unbalanced_dataset.over_sampling import SMOTE
from unbalanced_dataset.pipeline import SMOTEENN
from unbalanced_dataset.pipeline import SMOTETomek
from unbalanced_dataset.under_sampling import UnderSampler, ClusterCentroids, NearMiss, TomekLinks
from adasyn import ADASYN
import numpy as np
# Constants for classifier names
LOG_REG = 'log_reg'
SVM = 'svm'
ADA_BOOST = 'ada_boost'
GRADIENT_BOOST = 'gradient_boost'
RF = 'random_forest'
EXTRA_TREES = 'extra_trees'
BAGGING = 'bagging'
PASSIVE_AGGRESSIVE = 'passive_aggressive'
PERCEPTRON = 'perceptron'
# Constants for sampling techniques
SMOTE_REG = "smote"
SMOTE_SVM = "smote_svm"
SMOTE_BORDERLINE_1 = "smote_borderline_1"
SMOTE_BORDERLINE_2 = "smote_borderline_2"
SMOTE_ENN = "smote_enn"
SMOTE_TOMEK = "smote_tomek"
UNDERSAMPLER = "undersampler"
TOMEK_LINKS = "tomlek_links"
CLUSTER_CENTROIDS = "cluster_centroids"
NEARMISS = "near_miss"
ADASYN_SAMPLER = "adasyn"
class ClassifyArgs(object):
"""
A class to represent the arguments used in classify
"""
def __init__(self, data_file="../Data/orange_small_train.data",
train_file="../Data/orange_small_train.data",
test_file="../Data/orange_small_train.data",classify=False,
classifiers=None, kernel='rbf', cross_validate=False,
write_to_log=False, features=None, scale=False,
kfold=False, write_predictions=False, grid_search=False):
self.data_file = data_file
self.train_file = train_file
self.test_file = test_file
self.classify = classify
if classifiers is None:
classifiers = ['svm']
else:
self.classifiers = classifiers
self.kernel = kernel
self.cross_validate = cross_validate
self.write_to_log = write_to_log
if features is None:
self.features = []
else:
self.features = features
self.scale = scale
self.kfold = kfold
self.write_predictions = write_predictions
def __repr__(self):
str_list = [self.data_file, self.train_file, self.test_file,
self.classify, self.kernel, self.cross_validate, self.scale,
self.kfold]
str_list += self.features
return "_".join([str(x) for x in str_list])
def write_log(out_file_name, args, classifier, precision, recall,
true_count, actual_count, X_train, X_test, auc, accuracy,
probablistic_prediction, prediction_threshold):
"""
Function to write results of a run to a file.
"""
# Get the kernel type if classifier is SVM, otherwise just put NA
get_kernel = lambda x: x == 'svm' and args.kernel or "NA"
# Log important info
log = [args.data_file, args.train_file, args.test_file,
classifier, get_kernel(classifier), args.scale, len(X_train),
len(X_test), precision, recall, accuracy, true_count, actual_count,
auc, args.sampling_technique, args.sampling_ratio, args.select_best]
with open(out_file_name, 'a') as f:
out_writer = csv.writer(f, lineterminator='\n')
out_writer.writerow(log)
def __print_and_log_results(clf, classifier, x_train, x_test, y_test, out_file_name,
args):
probablistic_predictions = False
if args.predict_proba:
predict_proba_func = getattr(clf, "predict_proba", None)
if predict_proba_func is not None:
probablistic_predictions = True
prob_predictions = clf.predict_proba(x_test)
predictions = []
pos_predictions = []
for prediction in prob_predictions:
pos_predictions.append(prediction[1])
if prediction[1] > args.predict_threshold:
predictions.append(1)
else:
predictions.append(-1)
pos_predictions = np.array(pos_predictions)
mean_confidence = np.mean(pos_predictions)
max_confidence = max(pos_predictions)
min_confidence = min(pos_predictions)
print "Mean confidence: " + str(mean_confidence)
print "Max confidence: " + str(max_confidence)
print "Min confidence: " + str(min_confidence)
predictions = np.array(predictions)
else:
predictions = clf.predict(x_test)
else:
predictions = clf.predict(x_test)
precision = precision_score(y_test, predictions, [-1, 1])
recall = recall_score(y_test, predictions, [-1, 1])
auc_score = roc_auc_score(y_test, predictions, None)
accuracy = accuracy_score(y_test, predictions)
print "Train/test set sizes: " + str(len(x_train)) + "/" + str(len(x_test))
print "Precision is: " + str(precision)
print "Recall is: " + str(recall)
print "AUC ROC Score is: " + str(auc_score)
print "Accuracy is: " + str(accuracy)
true_count = len([1 for p in predictions if p == 1])
actual_count = len([1 for y in y_test if y == 1])
print "True count (prediction/actual): " + str(true_count) + "/" + str(actual_count)
if args.write_to_log:
# Write out results as a table to log file
write_log(out_file_name=out_file_name, args=args, classifier=classifier,
precision=precision, recall=recall,
true_count=true_count, actual_count=actual_count,
X_train=x_train, X_test=x_test,
auc=auc_score, accuracy=accuracy,
probablistic_prediction=probablistic_predictions,
prediction_threshold=args.predict_threshold)
def __get_sample_transformed_examples(sample_type, train_x, train_y, ratio):
sampler = None
verbose = True
if sample_type == SMOTE_REG:
sampler = SMOTE(kind='regular', verbose=verbose, ratio=ratio, k=15)
elif sample_type == SMOTE_SVM:
# TODO: Make this configurable?
svm_args = {'class_weight' : 'balanced'}
sampler = SMOTE(kind='svm', ratio=ratio, verbose=verbose, k=15, **svm_args)
elif sample_type == SMOTE_BORDERLINE_1:
sampler = SMOTE(kind='borderline1', ratio=ratio, verbose=verbose)
elif sample_type == SMOTE_BORDERLINE_2:
sampler = SMOTE(kind='borderline2', ratio=ratio, verbose=verbose)
elif sample_type == SMOTE_ENN:
sampler = SMOTEENN(ratio=ratio, verbose=verbose, k=15)
elif sample_type == SMOTE_TOMEK:
sampler = SMOTETomek(ratio=ratio,verbose=verbose, k=15)
elif sample_type == UNDERSAMPLER:
sampler = UnderSampler(ratio=ratio, verbose=verbose, replacement=False,
random_state=17)
elif sample_type == ADASYN_SAMPLER:
sampler = ADASYN(k=15,imb_threshold=0.6, ratio=ratio)
elif sample_type == TOMEK_LINKS:
sampler = TomekLinks()
elif sample_type == CLUSTER_CENTROIDS:
sampler = ClusterCentroids(ratio=ratio)
elif sample_type == NEARMISS:
sampler = NearMiss(ratio=ratio)
else:
print "Unrecoqnized sample technique: " + sample_type
print "Returning original data"
return train_x, train_y
return sampler.fit_transform(train_x, train_y)
def __get_classifier_model(classifier, args):
"""
Convenience function for obtaining a classification model
Args:
classifier(str): A string indicating the name of the classifier
args: An arguments object
Returns:
A classification model based on the given classifier string
"""
# Make SGD Logistic Regression model the default
model = SGDClassifier(loss='log', penalty='l2', shuffle=True, n_iter=5,
n_jobs=-1, random_state=179)
if classifier == SVM:
model = SVC(kernel=args.kernel, class_weight="balanced", cache_size=8096,
random_state=17, probability=True)
elif classifier == ADA_BOOST:
dt = DecisionTreeClassifier(max_depth=15, criterion='gini',
max_features='auto', class_weight='balanced',
random_state=39)
model = AdaBoostClassifier(base_estimator=dt, n_estimators=400, random_state=17)
elif classifier == RF:
# Configure the classifier to use all available CPU cores
model = RandomForestClassifier(class_weight="balanced", n_jobs=-1,
n_estimators=400, random_state=17,
max_features='auto', max_depth=15,
criterion='gini')
elif classifier == GRADIENT_BOOST:
model = GradientBoostingClassifier(random_state=17, n_estimators=400,
max_features='auto')
elif classifier == EXTRA_TREES:
model = ExtraTreesClassifier(random_state=17, n_estimators=400, n_jobs=-1,
class_weight='balanced', max_depth=15,
max_features='auto', criterion='gini')
elif classifier == BAGGING:
dt = DecisionTreeClassifier(max_depth=15, criterion='gini',
max_features='auto', class_weight='balanced',
random_state=39)
model = BaggingClassifier(base_estimator=dt, n_estimators=400,
random_state=17, n_jobs=-1, max_features=0.8,
max_samples=0.8, bootstrap=False)
elif classifier == PASSIVE_AGGRESSIVE:
model = PassiveAggressiveClassifier(n_iter=10, class_weight='balanced',
n_jobs=-1, random_state=41)
elif classifier == PERCEPTRON:
model = Perceptron(n_jobs=-1, n_iter=10, penalty='l2',
class_weight='balanced', alpha=0.25)
return model
def main(args):
out_file_name = "results.log"
if args.classify:
# Cast to list to keep it all in memory
train = list(csv.reader(open(args.train_file, 'r')))
test = list(csv.reader(open(args.test_file, 'r')))
x_train = np.array(train[1:], dtype=float)
x_test = np.array(test[1:], dtype=float)
train_labels_file = open(args.train_labels)
y_train = np.array([int(x.strip()) for x in train_labels_file.readlines()])
test_labels_file = open(args.test_labels)
y_test = np.array([int(x.strip()) for x in test_labels_file.readlines()])
train_labels_file.close()
test_labels_file.close()
if args.sampling_technique:
print "Attempting to use sampling technique: " + args.sampling_technique
if args.sampling_ratio == float('NaN'):
print "Unable to use sampling technique. Ratio is NaN."
else:
x_train, y_train = __get_sample_transformed_examples(args.sampling_technique,
x_train, y_train,
args.sampling_ratio)
if args.scale:
scaler = RobustScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.fit_transform(x_test)
for classifier in args.classifiers:
model = __get_classifier_model(classifier, args)
print "Using classifier " + classifier
print "Fitting data to model"
if args.grid_search:
print "Applying parameter tuning to model"
if classifier == LOG_REG:
parameters = {'loss':('log','hinge'), 'penalty':('l2', 'l1'), 'shuffle':[True], 'n_iter':[5], 'n_jobs':[-1], 'random_state':[179]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
elif classifier == SVM:
parameters = {'kernel':('rbf', 'poly'), 'cache_size':[8096], 'random_state':[17]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
elif classifier == ADA_BOOST:
parameters = {'n_estimators':[300], 'random_state':[13]}
model = grid_search.GridSearchCV(model, parameters, scoring=roc_auc_score, verbose=2)
elif classifier == RF:
parameters = {'criterion':('gini', 'entropy'), 'n_jobs':[-1], 'n_estimators':[300], 'random_state':[17]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
elif classifier == GRADIENT_BOOST:
parameters = {'n_estimators':[300], 'random_state':[17]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
elif classifier == EXTRA_TREES:
parameters = {'n_estimators':[300], 'random_state':[17], 'n_jobs':[-1], 'criterion':('gini', 'entropy'), 'max_features':('log2', 40, 0.4), 'max_features':[40, 0.4], 'bootstrap':[True, False], 'bootstrap_features':[True, False]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
elif classifier == BAGGING:
parameters = {'n_estimators':[300], 'random_state':[17], 'max_samples': [.4, 30],'max_features':[40, 0.4], 'bootstrap':[True, False], 'bootstrap_features':[True, False], 'n_jobs':[-1]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
print "Best params: " + str(model.best_params_)
clf = model.fit(x_train, y_train)
print "Parameters used in model:"
#print clf.get_params(deep=False)
if args.select_best:
# Unable to use BaggingClassifier with SelectFromModel
if classifier != BAGGING:
print "Selecting best features"
sfm = SelectFromModel(clf, prefit=True)
x_train = sfm.transform(x_train)
x_test = sfm.transform(x_test)
clf = model.fit(x_train, y_train)
__print_and_log_results(clf, classifier, x_train, x_test, y_test,
out_file_name, args)
elif args.cross_validate:
# Cast to list to keep it all in memory
labels_file = open(args.labels)
labels = np.array([int(x.strip()) for x in labels_file.readlines()])
labels_file.close()
data_file = open(args.data_file, 'r')
data = list(csv.reader(data_file))
data_file.close()
examples = np.array(data[1:], dtype=float)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(examples, labels, test_size=0.1)
if args.sampling_technique:
print "Attempting to use sampling technique: " + args.sampling_technique
if args.sampling_ratio == float('NaN'):
print "Unable to use sampling technique. Ratio is NaN."
else:
X_train, y_train = __get_sample_transformed_examples(args.sampling_technique,
X_train, y_train,
args.sampling_ratio)
if args.scale:
scaler = StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
for classifier in args.classifiers:
print "Using classifier " + classifier
model = __get_classifier_model(classifier, args)
print "Fitting model"
if args.grid_search:
print "Applying parameter tuning to model"
if classifier == LOG_REG:
parameters = {'loss':('log','hinge'), 'penalty':('l2', 'l1'), 'shuffle':[True], 'n_iter':[5], 'n_jobs':[-1], 'random_state':[179]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
elif classifier == SVM:
parameters = {'kernel':('rbf', 'poly'), 'cache_size':[8096], 'random_state':[17]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
elif classifier == ADA_BOOST:
parameters = {'n_estimators':[300], 'random_state':[13]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
elif classifier == RF:
parameters = {'criterion':('gini', 'entropy'), 'n_jobs':[-1], 'n_estimators':[300], 'random_state':[17]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
elif classifier == GRADIENT_BOOST:
parameters = {'n_estimators':[300], 'random_state':[17]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
elif classifier == EXTRA_TREES:
parameters = {'n_estimators':[300], 'random_state':[17], 'n_jobs':[-1], 'criterion':('gini', 'entropy'), 'max_features':('log2', 40, 0.4), 'max_features':[40, 0.4], 'bootstrap':[True, False], 'bootstrap_features':[True, False]}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
elif classifier == BAGGING:
#parameters = {'n_estimators' : [400], 'random_state' : [17],
# 'max_samples' : np.arange(0.5, 0.9, 0.1),
# 'max_features' : np.arange(0.5, 0.9, 0.1),
# 'bootstrap':[False], 'bootstrap_features':[False], 'n_jobs':[-1]}
parameters = {"base_estimator__criterion" : ["gini", "entropy"],
"base_estimator__splitter" : ["best", "random"],
"base_estimator__max_depth" : [10, 15, 20, 25],
"base_estimator__class_weight" : ['balanced'],
"base_estimator__max_features" : ['auto', 'log2']
}
model = grid_search.GridSearchCV(model, parameters, scoring='roc_auc', verbose=2)
clf = model.fit(X_train, y_train)
if args.grid_search:
print "Best params: " + str(model.best_params_)
if args.select_best:
if classifier != BAGGING:
print "Selecting best features"
sfm = SelectFromModel(clf, prefit = True)
X_train = sfm.transform(X_train)
X_test = sfm.transform(X_test)
clf = model.fit(X_train, y_train)
print "Evaluating results"
__print_and_log_results(clf, classifier, X_train, X_test, y_test,
out_file_name, args)
elif args.kfold:
# Cast to list to keep it all in memory
data_file = open(args.data_file, 'r')
data = list(csv.reader(data_file))
data_file.close()
labels_file = open(args.labels)
labels = np.array([int(x.strip()) for x in labels_file.readlines()])
labels_file.close()
X = np.array(data[1:], dtype=float)
kf = KFold(len(X), n_folds=10, shuffle=True, random_state=42)
for train, test in kf:
print "kfold loop iterate"
X_train, X_test, y_train, y_test = X[train], X[test], labels[train], labels[test]
if args.sampling_technique:
print "Attempting to use sampling technique: " + args.sampling_technique
if args.sampling_ratio == float('NaN'):
print "Unable to use sampling technique. Ratio is NaN."
else:
X_train, y_train = __get_sample_transformed_examples(args.sampling_technique,
X_train, y_train,
args.sampling_ratio)
if args.scale:
scaler = StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
for classifier in args.classifiers:
print "Using classifier " + classifier
model = __get_classifier_model(classifier, args)
print "Fitting model"
clf = model.fit(X_train, y_train)
if args.select_best:
if classifier != BAGGING:
sfm = SelectFromModel(clf, prefit = True)
X_train = sfm.transform(X_train)
X_test = sfm.transform(X_test)
clf = model.fit(X_train, y_train)
print "Evaluating results"
__print_and_log_results(clf, classifier, X_train, X_test, y_test,
out_file_name, args)
print "kfold loop done"
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
# Data file should be used when the task is cross-validation or k-fold
# validation
argparser.add_argument("--data_file", help="Name of data file",
type=str, default="../Data/orange_small_train.data",
required=False)
# Labels is intended to be used for the cross-validation or k-fold validation
# task
argparser.add_argument("--labels", help="Name of labels file",
type=str, default="../Data/orange_small_train_churn.labels",
required=False)
# Train file and test file are intended for classification (the classify
# option)
argparser.add_argument("--train_file", help="Name of train file",
type=str, default="../Data/orange_small_train.data",
required=False)
argparser.add_argument("--test_file", help="Name of test file",
type=str, default="../Data/orange_small_test.data",
required=False)
# Test and train labels are needed for the classify task
argparser.add_argument("--test_labels", help="Name of test labels file",
type=str, default="../Data/orange_small_train_churn.labels",
required=False)
argparser.add_argument("--train_labels", help="Name of train labels file",
type=str, default="../Data/orange_small_train_churn.labels",
required=False)
# The classify task uses pre-split train/test files with train/test labels
argparser.add_argument("--classify", help="Classify using training and test set",
action="store_true")
argparser.add_argument("--classifiers", help="A list of classifiers to use",
nargs='+', required=False, default=['log_reg'])
argparser.add_argument("--kernel",
help="The kernel to be used for SVM classification",
type=str, default='rbf')
argparser.add_argument("--cross_validate",
help="Cross validate using training and test set",
action="store_true")
argparser.add_argument("--kfold", help="10-fold cross validation",
action="store_true")
argparser.add_argument("--write_to_log", help="Send output to log file",
action="store_true")
argparser.add_argument("--scale", help="Scale the data with StandardScale",
action="store_true")
argparser.add_argument("--sampling_technique",
help="The sampling technique to use", type=str, required=False)
argparser.add_argument("--sampling_ratio",
help="The sampling ratio to use", type=float,
default=float('NaN'), required=False)
argparser.add_argument("--grid_search", help="Use grid search",
action="store_true")
argparser.add_argument("--select_best", help="Select best features",
action="store_true")
argparser.add_argument("--predict_proba", help="Select best features",
action="store_true")
argparser.add_argument("--predict_threshold",
help="The prediction threshold to use", type=float,
default=0.55, required=False)
args = argparser.parse_args()
main(args)
|
mit
|
RayMick/scikit-learn
|
sklearn/utils/multiclass.py
|
45
|
12390
|
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
|
bsd-3-clause
|
NationalSecurityAgency/timely
|
client/src/main/python/timely/TimelyAnalyticConfiguration.py
|
1
|
4996
|
import pandas
class TimelyAnalyticConfiguration():
def __init__(self, analyticConfig):
if isinstance(analyticConfig, dict):
self.groupByColumn = analyticConfig.get('groupByColumn', None)
self.includeColRegex = analyticConfig.get('includeColRegex', None)
self.excludeColRegex = analyticConfig.get('excludeColRegex', None)
self.counter = analyticConfig.get('counter', False)
self.sample_period = analyticConfig.get('sample', None)
self.how = analyticConfig.get('how', 'mean')
self.interpolate = analyticConfig.get('interpolate', True)
self.fill = analyticConfig.get('fill', None)
self.rolling_average_period = analyticConfig.get('rolling_average_period', None)
self.min_threshold = analyticConfig.get('min_threshold', None)
self.average_min_threshold = analyticConfig.get('average_min_threshold', None)
self.max_threshold = analyticConfig.get('max_threshold', None)
self.average_max_threshold = analyticConfig.get('average_max_threshold', None)
self.min_threshold_percentage = analyticConfig.get('min_threshold_percentage', None)
self.max_threshold_percentage = analyticConfig.get('max_threshold_percentage', None)
self.min_alert_period = analyticConfig.get('min_alert_period', None)
boolean = analyticConfig.get('boolean', 'or')
self.orCondition = boolean == 'or' or boolean == 'OR'
# alerts or all
self.display = analyticConfig.get('display', 'alerts')
self.send_alerts_to = analyticConfig.get('send_alerts_to', [])
self.output_dir = analyticConfig.get('output_dir', '/tmp')
self.last_alert = analyticConfig.get('last_alert', None)
self.system_name = analyticConfig.get('system_name', None)
self.sample = None
self.sample_minutes = None
if self.sample_period is not None:
td = pandas.to_timedelta(self.sample_period)
self.sample_minutes = int(td.total_seconds() / 60)
self.sample = str(self.sample_minutes) + 'min'
self.rolling_average_samples = None
self.rolling_average_minutes = None
if (self.rolling_average_period is not None) and (self.sample_minutes is not None):
td = pandas.to_timedelta(self.rolling_average_period)
self.rolling_average_minutes = int(td.total_seconds() / 60)
self.rolling_average_samples = int(self.rolling_average_minutes / self.sample_minutes)
self.min_alert_minutes = None
if self.min_alert_period is not None:
td = pandas.to_timedelta(self.min_alert_period)
self.min_alert_minutes = int(td.total_seconds() / 60)
self.last_alert_minutes = None
if self.last_alert is not None:
td = pandas.to_timedelta(self.last_alert)
self.last_alert_minutes = int(td.total_seconds() / 60)
elif isinstance(analyticConfig, TimelyAnalyticConfiguration):
self.groupByColumn = analyticConfig.groupByColumn
self.includeColRegex = analyticConfig.includeColRegex
self.excludeColRegex = analyticConfig.excludeColRegex
self.counter = analyticConfig.counter
self.sample_period = analyticConfig.sample_period
self.sample_minutes = analyticConfig.sample_minutes
self.sample = analyticConfig.sample
self.how = analyticConfig.how
self.interpolate = analyticConfig.interpolate
self.fill = analyticConfig.fill
self.rolling_average_period = analyticConfig.rolling_average_period
self.rolling_average_samples = analyticConfig.rolling_average_samples
self.rolling_average_minutes = analyticConfig.rolling_average_minutes
self.min_threshold = analyticConfig.min_threshold
self.average_min_threshold = analyticConfig.average_min_threshold
self.max_threshold = analyticConfig.max_threshold
self.average_max_threshold = analyticConfig.average_max_threshold
self.min_threshold_percentage = analyticConfig.min_threshold_percentage
self.max_threshold_percentage = analyticConfig.max_threshold_percentage
self.min_alert_period = analyticConfig.min_alert_period
self.min_alert_minutes = analyticConfig.min_alert_minutes
self.orCondition = analyticConfig.orCondition
# alerts or all
self.display = analyticConfig.display
self.send_alerts_to = analyticConfig.send_alerts_to
self.output_dir = analyticConfig.output_dir
self.last_alert = analyticConfig.last_alert
self.last_alert_minutes = analyticConfig.last_alert_minutes
self.system_name = analyticConfig.system_name
|
apache-2.0
|
Achuth17/scikit-learn
|
examples/svm/plot_svm_regression.py
|
249
|
1451
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
bsd-3-clause
|
Aufuray/ross-sea-project
|
app/reports/sic_report.py
|
1
|
9784
|
# ============================================================================
#
#
# Sea Ice Report
#
#
# ============================================================================
import sys
import numpy as np
from pandas import DataFrame
from scipy.stats import itemfreq
from matplotlib import pyplot as plt
from scipy.ndimage import filters, sobel
from tools import data
from app.models import LMImage as LM
from app.models import SICImage as SIC
from app.reports.analysis import day_image, hist_match
# ====================================================================
# Basic Statistical Analysis
# ====================================================================
def land_sic_overlap(lm_image, sic_image):
"""
Show Sea Ice Concentration and Land Mask together. This figure shows
the overlaps between mw_sic and lm.
"""
lm = lm_image
sic = sic_image
sic_surface = sic.surface(boolean=False)
lm_surface = lm.image()
condlist = [lm_surface == 1]
choicelist = [3]
merge = np.add(sic_surface, np.select(condlist, choicelist))
freqs = itemfreq(merge)
# Pie Chart config params
labels = "Sea Water", "Sea Ice", "Land", "Land - Sea Ice Overlap"
colors = ["blue", "lightblue", "yellow", "red"]
values = [freqs[0][1], freqs[1][1], freqs[2][1], freqs[3][1]]
# Make and cofigure figure to be displayed
fig, axes = plt.subplots(1, 2)
fig.subplots_adjust(hspace=0.3, wspace=0.05)
#populate each axis of the figure
axes[0].imshow(merge)
axes[0].set_title("Sea Ice and Land Mask")
axes[1].pie(values, explode=[0.1, 0.1, 0.1, 0.4], labels=labels,
colors=colors, shadow=True, autopct='%1.2f%%')
plt.show()
def land_sic_overlap_timeseries(instrument,
title="Land-Sea Ice Border Variations"):
"""
Time Series that shows the percentage variations of the land mask
border given the expansion of sea ice in VIRS.
"""
files = data.file_names(instrument_id=data.INSTRUMENT_MAP.get(instrument))
out = []
for idx, mat in enumerate(data.mat_generator(files)):
sic = SIC(files[idx])
lm = LM(files[idx])
sic_surface = sic.surface(boolean=False)
lm_surface = lm.silhoutte()
silhoutte_freq = itemfreq(lm_surface)
border = silhoutte_freq[1][1]
merge = np.add(sic_surface, lm_surface)
merge_freq = itemfreq(merge)
intercept = merge_freq[2][1]
land_ice_overlap = (float(intercept) / border) * 100
temp = {'timestamp': lm.title, 'intercept': land_ice_overlap}
out.append(temp)
index = [elem['timestamp'] for elem in out]
df = DataFrame(out, index=index)
sdf = df.sort_values(by='timestamp')
sdf.plot(title=title)
plt.show()
def time_series(instrument='vir', title="SIC Percentage Changes"):
"""
Show the change over time in sea ice conectration level by displaying
a graph of the percentage change over time in sea ice concentration.
:params:
:param instrument: use the tools/data.py map to choose the right
instrument. defaults to vir.
"""
# VIRS or Modis files
files = data.file_names(instrument_id=data.INSTRUMENT_MAP[instrument])
out = []
for idx, mat in enumerate(data.mat_generator(files)):
sic = SIC(files[idx])
out.append(sic.percentage())
index = [elem['timestamp'] for elem in out]
df = DataFrame(out, index=index)
sdf = df.sort_values(by='timestamp')
sdf.plot(title=title)
plt.show()
def surface_analysis(sic_image, save=False, path=None):
"""
Shows the difference in Sea Ice Concentration for one image by
showing a subplot that includes the original image the sea ice in
black and white.
:params:
:param sic_image: SICImage, the sea ice concentration image object
that contains an image's information.
:param save: boolean to save
"""
sic = sic_image
pos1, pos2, pos3 = (221, 222, 223)
seaice_surface = sic.surface()
figure = plt.figure()
figure.suptitle(
"Sea Ice concentration and Surface for {0}".format(sic.filename))
original = plt.subplot(pos1)
original.set_title("{0}".format(sic.title))
org = original.imshow(sic.image())
figure.colorbar(org, orientation="vertical")
sea_ice_surface = plt.subplot(pos2)
sea_ice_surface.set_title("Sea Ice Surface".format(sic.title))
sea_ice_surface.imshow(seaice_surface)
silhoutte = plt.subplot(pos3)
silhoutte.set_title("Generic Laplace - Ice silhoutte")
silhoutte.imshow(
filters.generic_laplace(seaice_surface, sobel), cmap='Greys_r')
plt.show()
def silhoutte(img):
"""
Show's the silhoutte of the area where sea ice is located. The final
result is shown in black ond white.
"""
if isinstance(img, SIC):
seaice_surface = img.surface()
im = filters.generic_laplace(seaice_surface, sobel)
#TODO: The output can be more clear, we need to find a filter that
# better connects the edges of the output.
plt.imshow(im, cmap='Greys_r')
plt.title('Sea Ice Concentration (mw_sic) silhoutte')
elif isinstance(img, LM):
plt.imshow(img.silhoutte(), cmap='Greys', interpolation='nearest')
plt.title('Land Mask (lm) silhoutte')
else:
print "The image passed is not SICImage or LMImage"
sys.exit(1)
plt.show()
def distribution(img):
"""
Shows a pie chart with the sea ice or land mask percentage on a given
image/time of the day.
"""
percentages = img.percentage()
if isinstance(img, SIC):
labels = 'Ice', 'other'
colors = ['lightskyblue', 'yellowgreen']
values = [percentages['ice'], percentages['other']]
plt.pie(values, explode=[0.1, 0], labels=labels, colors=colors,
shadow=True, autopct='%1.2f%%')
plt.title('SIC (mw_sic) Distribution - {0}'.format(img.title))
elif isinstance(img, LM):
labels = 'Land', 'Other'
colors = ['yellowgreen', 'lightskyblue']
values = [percentages['lm'], percentages['other']]
plt.pie(values, explode=[0.1, 0], labels=labels, colors=colors,
shadow=True, autopct='%1.2f%%')
plt.title('Land Mask (lm) Distribution - {0}'.format(img.title))
else:
print "The image passed is not SICImage or LMImage"
sys.exit(1)
plt.axis('equal')
plt.show()
# ====================================================================
# Histogram Matching Analysis
# ====================================================================
def unified_day_image(lense, interval=20):
"""
:params:
:param lense: string with the key/lense to be used. Options are
mw_sic, lm
:param interval: integer, that indicates the maximum time interval
between pictures of different instruments. This
interval is in minutes.
"""
virs_files = data.file_names(data.INSTRUMENT_MAP.get('vir'))
modis_files = data.file_names(data.INSTRUMENT_MAP.get('mod'))
processed = list()
titles = list()
for idx, vir in enumerate(virs_files):
if idx >= len(virs_files):
break
virs_date = data.parse_date(vir)
modis_date = data.parse_date(modis_files[idx])
if data.date_dff(virs_date, modis_date) <= interval:
source = SIC(virs_files[idx])
template = SIC(modis_files[idx])
out = hist_match(source.image(), template.image())
processed.append(out)
titles.append("{0} and {1}".format(source.title, template.title))
# Make and cofigure figure to be displayed
if len(processed) == 0:
print "No pictures were processed, consider changing the interval"
sys.exit(0)
elif len(processed) == 1:
plt.imshow(processed[0])
else:
boxes = len(processed)
if boxes % 2 > 0:
boxes = boxes + 1
levels = boxes / 2
fig, axes = plt.subplots(levels, 2)
fig.subplots_adjust(hspace=0.5, wspace=0.2)
fig.suptitle(
"VIRS-MODIS Hist. Matched {0} mins apart with {1} images".format(
interval, len(processed)),
fontsize=20)
if len(processed) <= 2:
for idx, img in enumerate(processed):
axes[idx].imshow(processed[idx])
axes[idx].set_title(titles[idx])
else:
idx = 0
for level in range(levels):
for box in range(2):
if idx < len(processed):
axes[level][box].imshow(processed[idx])
axes[level][box].set_title(
"{0} and {1}l".format(titles[idx], tiltes[idx]))
idx += 1
else:
break
plt.show()
def show_day_images_by_instrument():
"""
Show day images after histogram matching by instrument virs and modis
"""
virs = day_image(instrument='vir', lense="mw_sic")
modis = day_image(instrument='mod', lense="mw_sic")
# Make and cofigure figure to be displayed
fig, axes = plt.subplots(1, 2)
fig.subplots_adjust(hspace=0.3, wspace=0.05)
#populate each axis of the figure
axes[0].imshow(virs)
axes[0].set_title("VIRS")
axes[1].imshow(modis)
axes[1].set_title("MODIS")
plt.show()
|
mit
|
Fireblend/scikit-learn
|
examples/svm/plot_svm_kernels.py
|
329
|
1971
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
synthicity/orca
|
orca/utils/testing.py
|
2
|
2026
|
# Orca
# Copyright (C) 2016 UrbanSim Inc.
# See full license in LICENSE.
"""
Utilities used in testing of Orca.
"""
import numpy as np
import numpy.testing as npt
import pandas as pd
def assert_frames_equal(actual, expected, use_close=False):
"""
Compare DataFrame items by index and column and
raise AssertionError if any item is not equal.
Ordering is unimportant, items are compared only by label.
NaN and infinite values are supported.
Parameters
----------
actual : pandas.DataFrame
expected : pandas.DataFrame
use_close : bool, optional
If True, use numpy.testing.assert_allclose instead of
numpy.testing.assert_equal.
"""
if use_close:
comp = npt.assert_allclose
else:
comp = npt.assert_equal
assert (isinstance(actual, pd.DataFrame) and
isinstance(expected, pd.DataFrame)), \
'Inputs must both be pandas DataFrames.'
for i, exp_row in expected.iterrows():
assert i in actual.index, 'Expected row {!r} not found.'.format(i)
act_row = actual.loc[i]
for j, exp_item in exp_row.iteritems():
assert j in act_row.index, \
'Expected column {!r} not found.'.format(j)
act_item = act_row[j]
try:
comp(act_item, exp_item)
except AssertionError as e:
raise AssertionError(
str(e) + '\n\nColumn: {!r}\nRow: {!r}'.format(j, i))
def assert_index_equal(left, right):
"""
Similar to pdt.assert_index_equal but is not sensitive to key ordering.
Parameters
----------
left: pandas.Index
right: pandas.Index
"""
assert isinstance(left, pd.Index)
assert isinstance(right, pd.Index)
left_diff = left.difference(right)
right_diff = right.difference(left)
if len(left_diff) > 0 or len(right_diff) > 0:
raise AssertionError("keys not in left [{0}], keys not in right [{1}]".format(
left_diff, right_diff))
|
bsd-3-clause
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/sklearn/datasets/tests/test_mldata.py
|
3
|
5446
|
"""Test functionality of mldata fetching utilities."""
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in, assert_not_in, mock_urllib2
from nose.tools import assert_equal, assert_raises
from nose import with_setup
from numpy.testing import assert_array_equal
import os
import shutil
import tempfile
import scipy as sp
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urllib2_ref = datasets.mldata.urllib2
datasets.mldata.urllib2 = mock_urllib2({'mock':
{'label': sp.ones((150,)),
'data': sp.ones((150, 4))}})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.urllib2.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urllib2 = _urllib2_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urllib2_ref = datasets.mldata.urllib2
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urllib2 = mock_urllib2({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urllib2 = _urllib2_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urllib2_ref = datasets.mldata.urllib2
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urllib2 = mock_urllib2({dataname:
({'label': y,
'data': x,
'z': z},
['z', 'data', 'label'])})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urllib2 = mock_urllib2({dataname:
({'y': y,
'x': x,
'z': z},
['y', 'x', 'z'])})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urllib2 = mock_urllib2({dataname:
({'y': y,
'x': x,
'z': z},
['z', 'x', 'y'])})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urllib2 = _urllib2_ref
|
agpl-3.0
|
khalibartan/pgmpy
|
pgmpy/estimators/BayesianEstimator.py
|
1
|
8074
|
# -*- coding: utf-8 -*-
import numpy as np
from pgmpy.estimators import ParameterEstimator
from pgmpy.factors.discrete import TabularCPD
from pgmpy.models import BayesianModel
class BayesianEstimator(ParameterEstimator):
def __init__(self, model, data, **kwargs):
"""
Class used to compute parameters for a model using Bayesian Parameter Estimation.
See `MaximumLikelihoodEstimator` for constructor parameters.
"""
if not isinstance(model, BayesianModel):
raise NotImplementedError("Bayesian Parameter Estimation is only implemented for BayesianModel")
super(BayesianEstimator, self).__init__(model, data, **kwargs)
def get_parameters(self, prior_type='BDeu', equivalent_sample_size=5, pseudo_counts=None):
"""
Method to estimate the model parameters (CPDs).
Parameters
----------
prior_type: 'dirichlet', 'BDeu', or 'K2'
string indicting which type of prior to use for the model parameters.
- If 'prior_type' is 'dirichlet', the following must be provided:
'pseudo_counts' = dirichlet hyperparameters; a dict containing, for each variable, a 2-D
array of the shape (node_card, product of parents_card) with a "virtual" count for each
variable state in the CPD, that is added to the state counts.
(lexicographic ordering of states assumed)
- If 'prior_type' is 'BDeu', then an 'equivalent_sample_size'
must be specified instead of 'pseudo_counts'. This is equivalent to
'prior_type=dirichlet' and using uniform 'pseudo_counts' of
`equivalent_sample_size/(node_cardinality*np.prod(parents_cardinalities))` for each node.
'equivalent_sample_size' can either be a numerical value or a dict that specifies
the size for each variable seperately.
- A prior_type of 'K2' is a shorthand for 'dirichlet' + setting every pseudo_count to 1,
regardless of the cardinality of the variable.
Returns
-------
parameters: list
List of TabularCPDs, one for each variable of the model
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import BayesianEstimator
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 4)),
... columns=['A', 'B', 'C', 'D'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D')])
>>> estimator = BayesianEstimator(model, values)
>>> estimator.get_parameters(prior_type='BDeu', equivalent_sample_size=5)
[<TabularCPD representing P(C:2) at 0x7f7b534251d0>,
<TabularCPD representing P(B:2 | C:2, A:2) at 0x7f7b4dfd4da0>,
<TabularCPD representing P(A:2) at 0x7f7b4dfd4fd0>,
<TabularCPD representing P(D:2 | C:2) at 0x7f7b4df822b0>]
"""
parameters = []
for node in self.model.nodes():
_equivalent_sample_size = equivalent_sample_size[node] if isinstance(equivalent_sample_size, dict) else \
equivalent_sample_size
_pseudo_counts = pseudo_counts[node] if pseudo_counts else None
cpd = self.estimate_cpd(node,
prior_type=prior_type,
equivalent_sample_size=_equivalent_sample_size,
pseudo_counts=_pseudo_counts)
parameters.append(cpd)
return parameters
def estimate_cpd(self, node, prior_type='BDeu', pseudo_counts=[], equivalent_sample_size=5):
"""
Method to estimate the CPD for a given variable.
Parameters
----------
node: int, string (any hashable python object)
The name of the variable for which the CPD is to be estimated.
prior_type: 'dirichlet', 'BDeu', 'K2',
string indicting which type of prior to use for the model parameters.
- If 'prior_type' is 'dirichlet', the following must be provided:
'pseudo_counts' = dirichlet hyperparameters; 2-D array of shape
(node_card, product of parents_card) with a "virtual" count for
each variable state in the CPD.
The virtual counts are added to the actual state counts found in the data.
(if a list is provided, a lexicographic ordering of states is assumed)
- If 'prior_type' is 'BDeu', then an 'equivalent_sample_size'
must be specified instead of 'pseudo_counts'. This is equivalent to
'prior_type=dirichlet' and using uniform 'pseudo_counts' of
`equivalent_sample_size/(node_cardinality*np.prod(parents_cardinalities))`.
- A prior_type of 'K2' is a shorthand for 'dirichlet' + setting every pseudo_count to 1,
regardless of the cardinality of the variable.
Returns
-------
CPD: TabularCPD
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import BayesianEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> estimator = BayesianEstimator(model, data)
>>> cpd_C = estimator.estimate_cpd('C', prior_type="dirichlet", pseudo_counts=[1, 2])
>>> print(cpd_C)
╒══════╤══════╤══════╤══════╤════════════════════╕
│ A │ A(0) │ A(0) │ A(1) │ A(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ B │ B(0) │ B(1) │ B(0) │ B(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(0) │ 0.25 │ 0.25 │ 0.5 │ 0.3333333333333333 │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(1) │ 0.75 │ 0.75 │ 0.5 │ 0.6666666666666666 │
╘══════╧══════╧══════╧══════╧════════════════════╛
"""
node_cardinality = len(self.state_names[node])
parents = sorted(self.model.get_parents(node))
parents_cardinalities = [len(self.state_names[parent]) for parent in parents]
cpd_shape = (node_cardinality, np.prod(parents_cardinalities, dtype=int))
if prior_type == 'K2':
pseudo_counts = np.ones(cpd_shape, dtype=int)
elif prior_type == 'BDeu':
alpha = float(equivalent_sample_size) / (node_cardinality * np.prod(parents_cardinalities))
pseudo_counts = np.ones(cpd_shape, dtype=float) * alpha
elif prior_type == 'dirichlet':
pseudo_counts = np.array(pseudo_counts)
if pseudo_counts.shape != cpd_shape:
raise ValueError("The shape of pseudo_counts must be: {shape}".format(
shape=str(cpd_shape)))
else:
raise ValueError("'prior_type' not specified")
state_counts = self.state_counts(node)
bayesian_counts = state_counts + pseudo_counts
cpd = TabularCPD(node, node_cardinality, np.array(bayesian_counts),
evidence=parents,
evidence_card=parents_cardinalities,
state_names=self.state_names)
cpd.normalize()
return cpd
|
mit
|
imaculate/scikit-learn
|
examples/datasets/plot_random_dataset.py
|
348
|
2254
|
"""
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
|
bsd-3-clause
|
tswast/google-cloud-python
|
bigquery_storage/setup.py
|
2
|
2571
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
name = "google-cloud-bigquery-storage"
description = "BigQuery Storage API API client library"
version = "0.7.0"
release_status = "Development Status :: 4 - Beta"
dependencies = [
"google-api-core[grpc] >= 1.14.0, < 2.0.0dev",
'enum34; python_version < "3.4"',
]
extras = {
"pandas": "pandas>=0.17.1",
"fastavro": "fastavro>=0.21.2",
"pyarrow": "pyarrow>=0.13.0, != 0.14.0",
}
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
packages = [
package for package in setuptools.find_packages() if package.startswith("google")
]
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="[email protected]",
license="Apache 2.0",
url="https://github.com/GoogleCloudPlatform/google-cloud-python",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
include_package_data=True,
zip_safe=False,
)
|
apache-2.0
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/pandas/tseries/common.py
|
9
|
9576
|
## datetimelike delegation ##
import numpy as np
from pandas.core.base import PandasDelegate, NoNewAttributesMixin
from pandas.core import common as com
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas import tslib
from pandas.core.common import (_NS_DTYPE, _TD_DTYPE, is_period_arraylike,
is_datetime_arraylike, is_integer_dtype, is_list_like,
is_datetime64_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype, is_categorical_dtype,
get_dtype_kinds, take_1d)
def is_datetimelike(data):
""" return a boolean if we can be successfully converted to a datetimelike """
try:
maybe_to_datetimelike(data)
return True
except (Exception):
pass
return False
def maybe_to_datetimelike(data, copy=False):
"""
return a DelegatedClass of a Series that is datetimelike
(e.g. datetime64[ns],timedelta64[ns] dtype or a Series of Periods)
raise TypeError if this is not possible.
Parameters
----------
data : Series
copy : boolean, default False
copy the input data
Returns
-------
DelegatedClass
"""
from pandas import Series
if not isinstance(data, Series):
raise TypeError("cannot convert an object of type {0} to a datetimelike index".format(type(data)))
index = data.index
name = data.name
orig = data if is_categorical_dtype(data) else None
if orig is not None:
data = orig.values.categories
if is_datetime64_dtype(data.dtype):
return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer'), index, name=name,
orig=orig)
elif is_datetime64tz_dtype(data.dtype):
return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer', ambiguous='infer'),
index, data.name, orig=orig)
elif is_timedelta64_dtype(data.dtype):
return TimedeltaProperties(TimedeltaIndex(data, copy=copy, freq='infer'), index,
name=name, orig=orig)
else:
if is_period_arraylike(data):
return PeriodProperties(PeriodIndex(data, copy=copy), index, name=name, orig=orig)
if is_datetime_arraylike(data):
return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer'), index,
name=name, orig=orig)
raise TypeError("cannot convert an object of type {0} to a datetimelike index".format(type(data)))
class Properties(PandasDelegate, NoNewAttributesMixin):
def __init__(self, values, index, name, orig=None):
self.values = values
self.index = index
self.name = name
self.orig = orig
self._freeze()
def _delegate_property_get(self, name):
from pandas import Series
result = getattr(self.values,name)
# maybe need to upcast (ints)
if isinstance(result, np.ndarray):
if is_integer_dtype(result):
result = result.astype('int64')
elif not is_list_like(result):
return result
# blow up if we operate on categories
if self.orig is not None:
result = take_1d(result, self.orig.cat.codes)
# return the result as a Series, which is by definition a copy
result = Series(result, index=self.index, name=self.name)
# setting this object will show a SettingWithCopyWarning/Error
result.is_copy = ("modifications to a property of a datetimelike object are not "
"supported and are discarded. Change values on the original.")
return result
def _delegate_property_set(self, name, value, *args, **kwargs):
raise ValueError("modifications to a property of a datetimelike object are not "
"supported. Change values on the original.")
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self.values, name)
result = method(*args, **kwargs)
if not com.is_list_like(result):
return result
result = Series(result, index=self.index, name=self.name)
# setting this object will show a SettingWithCopyWarning/Error
result.is_copy = ("modifications to a method of a datetimelike object are not "
"supported and are discarded. Change values on the original.")
return result
class DatetimeProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hour
>>> s.dt.second
>>> s.dt.quarter
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
def to_pydatetime(self):
return self.values.to_pydatetime()
DatetimeProperties._add_delegate_accessors(delegate=DatetimeIndex,
accessors=DatetimeIndex._datetimelike_ops,
typ='property')
DatetimeProperties._add_delegate_accessors(delegate=DatetimeIndex,
accessors=["to_period","tz_localize","tz_convert","normalize","strftime"],
typ='method')
class TimedeltaProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hours
>>> s.dt.seconds
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
def to_pytimedelta(self):
return self.values.to_pytimedelta()
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
return self.values.components.set_index(self.index)
TimedeltaProperties._add_delegate_accessors(delegate=TimedeltaIndex,
accessors=TimedeltaIndex._datetimelike_ops,
typ='property')
TimedeltaProperties._add_delegate_accessors(delegate=TimedeltaIndex,
accessors=["to_pytimedelta", "total_seconds"],
typ='method')
class PeriodProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hour
>>> s.dt.second
>>> s.dt.quarter
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
PeriodProperties._add_delegate_accessors(delegate=PeriodIndex,
accessors=PeriodIndex._datetimelike_ops,
typ='property')
PeriodProperties._add_delegate_accessors(delegate=PeriodIndex,
accessors=["strftime"],
typ='method')
class CombinedDatetimelikeProperties(DatetimeProperties, TimedeltaProperties):
# This class is never instantiated, and exists solely for the benefit of
# the Series.dt class property. For Series objects, .dt will always be one
# of the more specific classes above.
__doc__ = DatetimeProperties.__doc__
def _concat_compat(to_concat, axis=0):
"""
provide concatenation of an datetimelike array of arrays each of which is a single
M8[ns], datetimet64[ns, tz] or m8[ns] dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
Returns
-------
a single array, preserving the combined dtypes
"""
def convert_to_pydatetime(x, axis):
# coerce to an object dtype
if x.dtype == _NS_DTYPE:
if hasattr(x, 'tz'):
x = x.asobject
shape = x.shape
x = tslib.ints_to_pydatetime(x.view(np.int64).ravel())
x = x.reshape(shape)
elif x.dtype == _TD_DTYPE:
shape = x.shape
x = tslib.ints_to_pytimedelta(x.view(np.int64).ravel())
x = x.reshape(shape)
return x
typs = get_dtype_kinds(to_concat)
# datetimetz
if 'datetimetz' in typs:
# we require ALL of the same tz for datetimetz
tzs = set([ getattr(x,'tz',None) for x in to_concat ])-set([None])
if len(tzs) == 1:
return DatetimeIndex(np.concatenate([ x.tz_localize(None).asi8 for x in to_concat ]), tz=list(tzs)[0])
# single dtype
if len(typs) == 1:
if not len(typs-set(['datetime'])):
new_values = np.concatenate([x.view(np.int64) for x in to_concat],
axis=axis)
return new_values.view(_NS_DTYPE)
elif not len(typs-set(['timedelta'])):
new_values = np.concatenate([x.view(np.int64) for x in to_concat],
axis=axis)
return new_values.view(_TD_DTYPE)
# need to coerce to object
to_concat = [convert_to_pydatetime(x, axis) for x in to_concat]
return np.concatenate(to_concat,axis=axis)
|
gpl-2.0
|
Eniac-Xie/faster-rcnn-resnet
|
lib/roi_data_layer/minibatch.py
|
2
|
13966
|
# --------------------------------------------------------
# Fast R-CNN with OHEM
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Abhinav Shrivastava
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
from fast_rcnn.nms_wrapper import nms
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0) or cfg.TRAIN.USE_OHEM, \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = np.inf if cfg.TRAIN.USE_OHEM else cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
if cfg.TRAIN.HAS_RPN:
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
else: # not using RPN
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_inside_weights \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_inside_weights'] = bbox_inside_blob
blobs['bbox_outside_weights'] = \
np.array(bbox_inside_blob > 0).astype(np.float32)
return blobs
def get_allrois_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
if cfg.TRAIN.HAS_RPN:
# Doesn't support RPN yet.
# assert False
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
else: # not using RPN
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_inside_weights \
= _all_rois(roidb[im_i], num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_inside_weights'] = bbox_inside_blob
blobs['bbox_outside_weights'] = \
np.array(bbox_inside_blob > 0).astype(np.float32)
return blobs
def get_ohem_minibatch(loss, rois, labels, bbox_targets=None,
bbox_inside_weights=None, bbox_outside_weights=None):
"""Given rois and their loss, construct a minibatch using OHEM."""
loss = np.array(loss)
if cfg.TRAIN.OHEM_USE_NMS:
# Do NMS using loss for de-dup and diversity
keep_inds = []
nms_thresh = cfg.TRAIN.OHEM_NMS_THRESH
source_img_ids = [roi[0] for roi in rois]
for img_id in np.unique(source_img_ids):
for label in np.unique(labels):
sel_indx = np.where(np.logical_and(labels == label, \
source_img_ids == img_id))[0]
if not len(sel_indx):
continue
boxes = np.concatenate((rois[sel_indx, 1:],
loss[sel_indx][:,np.newaxis]), axis=1).astype(np.float32)
keep_inds.extend(sel_indx[nms(boxes, nms_thresh)])
hard_keep_inds = select_hard_examples(loss[keep_inds])
hard_inds = np.array(keep_inds)[hard_keep_inds]
else:
hard_inds = select_hard_examples(loss)
blobs = {'rois_hard': rois[hard_inds, :].copy(),
'labels_hard': labels[hard_inds].copy()}
if bbox_targets is not None:
assert cfg.TRAIN.BBOX_REG
blobs['bbox_targets_hard'] = bbox_targets[hard_inds, :].copy()
blobs['bbox_inside_weights_hard'] = bbox_inside_weights[hard_inds, :].copy()
blobs['bbox_outside_weights_hard'] = bbox_outside_weights[hard_inds, :].copy()
return blobs
def select_hard_examples(loss):
"""Select hard rois."""
# Sort and select top hard examples.
sorted_indices = np.argsort(loss)[::-1]
hard_keep_inds = sorted_indices[0:np.minimum(len(loss), cfg.TRAIN.BATCH_SIZE)]
# (explore more ways of selecting examples in this function; e.g., sampling)
return hard_keep_inds
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(
bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
roidb['bbox_targets'][keep_inds, :], num_classes)
return labels, overlaps, rois, bbox_targets, bbox_inside_weights
def _all_rois(roidb, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# To use custom cfg.TRAIN.BG_THRESH_LO, comment the following assert.
assert cfg.TRAIN.BG_THRESH_LO == 0.0, \
"OHEM works best with BG_THRESH_LO = 0.0 (current value is {}).".format(cfg.TRAIN.BG_THRESH_LO)
# Select foreground (background) RoIs.
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
bg_inds = np.where(overlaps < cfg.TRAIN.BG_THRESH_HI)[0]
# All RoIs.
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[len(fg_inds):] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
roidb['bbox_targets'][keep_inds, :], num_classes)
return labels, overlaps, rois, bbox_targets, bbox_inside_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
|
mit
|
pmeier82/SpikePlot
|
spikeplot/plot_waveforms.py
|
1
|
5398
|
# -*- coding: utf-8 -*-
#
# spikeplot - plot_waveforms.py
#
# Philipp Meier <pmeier82 at googlemail dot com>
# 2011-09-29
#
"""scatter plot for clustering data"""
__docformat__ = 'restructuredtext'
__all__ = ['waveforms']
##---IMPORTS
import scipy as sp
from .common import COLOURS, save_figure, check_plotting_handle, plt
##---FUNCTION
def waveforms(waveforms, samples_per_second=None, tf=None, plot_mean=False,
plot_single_waveforms=True, set_y_range=False,
plot_separate=True, templates=None, plot_handle=None,
colours=None, title=None, filename=None, show=True):
"""plot one set of spiketrains or two sets of spkitrains with their
interspike alignment
:Parameters:
waveforms : dict
Dict of ndarray, holding the waveforms for different units.
plot_handle : figure or axis
A reference to a figure or axis, or None if one has to be created.
samples_per_second : int
Scale factor for the axis.
tf : int
The template length of the waveforms
plot_mean : bool
If True, plot the mean-waveform per unit.
plot_single_waveforms : bool
If True, plot the single waveforms per unit.
plot_separate : bool
If True, plot each units waveforms in a separate axis.
templates : dict
dict holding one concatenates waveform per key
set_y_range : bool
Adjust the y-axis range so waveforms fit in nicely.
colours : list
A list of matplotlib conform color values (rgb 3-tuples). If None
the common.plot.COLORS set is used.
title : str
Title for the plot. No title if None or ''.
filename : str
If given and a valid path on the local system, save the figure.
show : bool
If True, show the figure.
:Returns:
matplotlib.figure
Reference th the figure ploted on
matplotlib.axis
Reference to the axis ploted on
"""
# setup figure if necessary
fig, ax = check_plotting_handle(plot_handle, create_ax=not plot_separate)
if plot_separate is True:
fig.clear()
ax = None
# checks and inits
if type(waveforms) is not dict:
waveforms = {'0':waveforms}
if colours is None:
col_lst = COLOURS
else:
col_lst = colours
srate = 1.0
if samples_per_second is not None:
srate = samples_per_second
for k in waveforms.keys():
if waveforms[k].ndim == 3:
waveforms[k] = sp.vstack(
[sp.hstack(
[waveforms[k][i, :, j]
for j in xrange(waveforms[k].shape[-1])])
for i in xrange(waveforms[k].shape[0])])
firstKey = waveforms.keys()[0]
nunits = len(waveforms)
my_ymin = waveforms[firstKey].min()
my_ymax = waveforms[firstKey].max()
my_xmax = waveforms[firstKey].shape[1] - 1
nc = 1
if tf is not None:
nc = int(waveforms[firstKey].shape[1] / tf)
# plot single wave forms
if plot_single_waveforms is True:
col_idx = 0
for u, k in enumerate(sorted(waveforms.keys())):
if plot_separate is True:
ax = fig.add_subplot(nunits, 1, u + 1, sharex=ax, sharey=ax)
nevent, nsample = waveforms[k].shape
my_ymin = min(my_ymin, waveforms[k].min())
my_ymax = max(my_ymax, waveforms[k].max())
my_xmax = max(my_xmax, waveforms[k].shape[1] - 1)
col = col_lst[col_idx % len(col_lst)]
if plot_mean is True:
col = 'gray'
for i in xrange(waveforms[k].shape[0]):
ax.plot(sp.arange(nsample) / srate, waveforms[k][i, :],
color=col)
col_idx += 1
# addition: per axis event count
ax.set_ylabel('n:%s' % nevent)
# plot cluster means
if plot_mean is True:
col_idx = 0
for u, k in enumerate(sorted(waveforms.keys())):
if plot_separate is True:
ax = fig.axes[u]
if templates is not None and k in templates:
my_mean = templates[k]
else:
my_mean = waveforms[k].mean(axis=0)
nevent, nsample = waveforms[k].shape
my_ymin = min(my_ymin, my_mean.min())
my_ymax = max(my_ymax, my_mean.max())
my_xmax = max(my_xmax, my_mean.size - 1)
ax.plot(sp.arange(nsample) / srate, my_mean,
c=col_lst[col_idx % len(col_lst)], lw=2)
col_idx += 1
# if multichannel waveforms, plot vertical lines at channel borders
if tf is not None:
for i in xrange(1, nc):
for a in fig.axes:
a.axvline((tf * i) / srate, ls='dashed', color='y')
# fancy stuff
if title is not None:
fig.suptitle(title)
if samples_per_second is not None:
ax.set_xlabel('time [s]')
else:
ax.set_xlabel('time [samples]')
ax.set_xlim(0, my_xmax)
if set_y_range is True:
ax.set_ylim((1.01 * my_ymin, 1.01 * my_ymax))
# produce plots
if filename is not None:
save_figure(fig, filename, '')
if show is True:
plt.show()
return fig
##--- MAIN
if __name__ == '__main__':
pass
|
mit
|
tomevans/planetc
|
planetc-3.4/transit.py
|
1
|
22006
|
import numpy as np
import matplotlib.pyplot as plt
import pdb, sys, os
import keporb, ma02
import phys_consts as consts
# 15Jul2013 TME:
# Actually, I think I worked out that the 'bug' mentioned
# in the previous comment was an artefact of the equations,
# rather than a bug in the code. It might be to do with the
# fact that the equations implemented in the code are
# themselves an approximation that is very good for moderate
# eccentricities, but starts breaking down for higher
# eccentricity orbits.
# 3Mar2013 TME:
# There appears to be a bug in the code for calculating
# eccentric transits that manifests itself as an egress
# (haven't seen an ingress case yet) that isn't perfectly
# smooth - it looks basically right but it's definitely
# a bit funny. Need to fix this bug.
#
# Nov2012 TME:
# This module provides python wrappers for underlying
# C routines that compute Mandel & Agol (2002) transit
# lightcurves.
def ma02_aRs( t, **pars ):
"""
Uses the Mandel & Agol (2002) analytic equations to compute
transit lightcurves. This routine takes the semimajor axis
directly as an input parameter, and therefore does not require
either the star or planet mass; this differs from the
ma02_RsMsMp() routine which uses the star and planet masses
with Kepler's third law to calculate the semimajor axis.
CALLING
F = transit.ma02_aRs( t, **pars )
INPUTS
** t - a numpy array containing the times that the lightcurve
is to be evaluated for.
KEYWORD INPUTS
** pars - a dictionary containing the keyword arguments.
['tr_type'] a flag that can be set to either 'primary',
'secondary' or 'both'; if it is set to 'both', the
Z-coordinate will be calculated along with the normalised
separation in order to distinguish between primary transits
and secondary eclipses, and to scale the alternating flux
changes accordingly; on the other hand, if it is set to
either 'primary' or 'secondary', the Z-coordinate will not
be calculated and all transit events will have the same
shape and depth depending on which is chosen; the latter
saves time and should almost always be used when modelling
on a per-transit basis.
['T0'] time of periapse passage, which this routine will
force to be the same as the mid-transit time if the orbit
is circular, by setting the argument of periapse to 3pi/2.
['P'] orbital period in same units of time as 'T0'.
['aRs'] semimajor axis in units of stellar radii.
['RpRs'] planetary radius in units of stellar radii.
['incl'] orbital inclination in degrees.
['b'] = aRs*cos(i), which can be provided instead of 'incl'
['ecc'] orbital eccentricity.
['SecDepth'] depth of the secondary eclipse; will be set to
zero if not explicitly specified.
['omega'] argument of periapse in degrees; this must be
explicitly specified if the eccentricity is nonzero; if
the orbit is circular, it will be forced to 3pi/2 regardless
of the input value, to ensure that T0 corresponds to the
time of mid-transit.
['ld'] an optional flag that can be set to either None,
'quad' or 'nonlin' to specify the type of limb
darkening law to be used; if it is not set, then no limb
darkening will be used, and this would also be the case if
it is set to None.
['gam1']+['gam2'] quadratic limb darkening coeffs; required
if the 'ld' flag is set to 'quad'.
['c1']+['c2']+['c3']+['c4'] nonlinear limb darkening coeffs;
required if the 'ld' flag is set to 'nonlin'.
OUTPUT
** F - a numpy array containing the relative flux values of
the model transit lightcurve.
"""
# Start unpacking paramteres:
T0 = pars[ 'T0' ]
P = pars[ 'P' ]
aRs = pars[ 'aRs' ]
RpRs = pars[ 'RpRs' ]
try:
incl_rad = np.deg2rad( pars['incl'] )
except:
try:
incl_rad = np.arccos( pars['b']/aRs )
except:
raise StandardError( 'Must provide at least one of incl or b' )
ecc = pars[ 'ecc' ]
try:
foot = pars[ 'foot' ]
except:
foot = 1.
try:
grad = pars[ 'grad' ]
except:
grad = 0.
try:
SecDepth = pars[ 'SecDepth' ]
except:
SecDepth = 0.
try:
tr_type = pars['tr_type']
except:
tr_type = 'both'
# Following the naming convention of the original
# Mandel & Agol (2002) paper for the limb darkening
# coefficients:
try:
if pars['ld']=='quad':
gam1 = pars[ 'gam1' ]
gam2 = pars[ 'gam2' ]
elif pars['ld']=='nonlin':
c1 = pars[ 'c1' ]
c2 = pars[ 'c2' ]
c3 = pars[ 'c3' ]
c4 = pars[ 'c4' ]
elif pars['ld']==None:
pars['ld'] = 'quad'
pars['gam1'] = 0.
pars['gam2'] = 0.
except:
pars['ld'] = 'quad'
pars['gam1'] = 0.
pars['gam2'] = 0.
# Calculate the mean anomaly:
t = t.flatten()
MeanAnom = ( 2*np.pi/P )*( t - T0 )
# Calculate the normalised separation between the
# planetary and stellar discs:
if ecc != 0.:
omega_rad = pars['omega'] * np.pi/180.
NormSep = keporb.NormSep( MeanAnom, aRs, ecc, omega_rad, incl_rad )
else:
omega_rad = 3.*np.pi/2.
try:
b = pars['b']
except:
b = aRs*np.cos( incl_rad )
NormSep = np.sqrt( ( ( aRs*np.sin( MeanAnom ) )**2. ) \
+ ( ( b*np.cos( MeanAnom ) )**2. ) )
# If we want to model both primary transits and secondary
# eclipses, we need to compute the Z coordinate to determine
# when the planet is in front of the star (Z<0) and behind
# the star (Z>0):
if tr_type=='both':
F = np.ones( len( t ) )
zcoord = keporb.Zcoord( MeanAnom, aRs, ecc, omega_rad, incl_rad )
ixsf = ( zcoord < 0 )
if ixsf.max()==True:
if pars['ld']=='quad':
F[ixsf] = ma02.F_quad( NormSep[ixsf], RpRs, \
pars['gam1'], pars['gam2'] )
elif pars['ld']=='nonlin':
F[ixsf] = ma02.F_nonlin( NormSep[ixsf], RpRs, \
pars['c1'], pars['c2'], \
pars['c3'], pars['c4'] )
else:
pdb.set_trace()
ixsb = ( zcoord >= 0 )
if ixsb.max()==True:
temp = ma02.F_quad( NormSep[ixsb], RpRs, 0.0, 0.0 ) - 1.
F[ixsb] = 1 + temp*SecDepth/( temp.max() - temp.min() ) #/( RpRs**2. )
# If we're only interested in the primary transits then
# we must take stellar limb darkening into account
# while treating the planet as an non-luminous disc:
elif tr_type=='primary':
if pars['ld']=='quad':
F = ma02.F_quad( NormSep, RpRs, \
pars['gam1'], pars['gam2'] )
elif pars['ld']=='nonlin':
F = ma02.F_nonlin( NormSep, RpRs, \
pars['c1'], pars['c2'], \
pars['c3'], pars['c4'] )
else:
print( '\n\n\n{0:s} not recognised as limb darkening type'\
.format( pars['ld'] ) )
pdb.set_trace()
# If we're only interested in the secondary eclipses
# we treat the planet as a uniform disc with no limb
# darkening:
elif tr_type=='secondary':
temp = ma02.F_quad( NormSep, RpRs, 0.0, 0.0 ) - 1.
F = 1 + temp*SecDepth/( temp.max() - temp.min() ) #/( RpRs**2. )
# If requested, re-scale the lightcurve by a linear
# trend before returning the output:
if ( grad!=0. )+( foot!=1. ):
twid = t.max() - t.min()
tmid = t.min() + 0.5*twid
F = F * ( foot + grad*( t - tmid ) )
# NOTE: This will change the absolute value of the
# eclipse depth, but the fractional value of the
# eclipse depth will remain the same.
return F
def ma02_RsMsRpMp( t, **pars ):
"""
Uses the Mandel & Agol (2002) analytic equations to compute
transit lightcurves. This routine takes the mass and radius
for both the star and planet as input parameters. The masses
are used with Kepler's third law to calculate the semimajor
axis. This differs from the ma02_aRs() routine which takes
the semimajor axis directly as an input parameter.
CALLING
F = transit.ma02_RsMsMp( t, **pars )
INPUTS
** t - a numpy array containing the times that the lightcurve
is to be evaluated for.
KEYWORD INPUTS
** pars - a dictionary containing the keyword arguments:
['tr_type'] a flag that can be set to either 'primary',
'secondary' or 'both'; if it is set to 'both', the
Z-coordinate will be calculated along with the normalised
separation in order to distinguish between primary transits
and secondary eclipses, and to scale the alternating flux
changes accordingly; on the other hand, if it is set to
either 'primary' or 'secondary', the Z-coordinate will not
be calculated and all transit events will have the same
shape and depth depending on which is chosen; the latter
saves time and should almost always be used when modelling
on a per-transit basis.
['T0'] time of periapse passage, which this routine will
force to be the same as the mid-transit time if the orbit
is circular, by setting the argument of periapse to 3pi/2.
['P'] orbital period in same units of time as 'T0'.
['Rs'] stellar radius in solar radii.
['Ms'] stellar mass in solar masses.
['Rp'] planetary radius in Jupiter radii.
['Mp'] planetary mass in Jupiter masses.
['incl'] orbital inclination in degrees.
['ecc'] orbital eccentricity.
['SecDepth'] depth of the secondary eclipse; will be set to
zero if not explicitly specified.
['omega'] argument of periapse in degrees; this must be
explicitly specified if the eccentricity is nonzero; if
the orbit is circular, it will be forced to 3pi/2 regardless
of the input value, to ensure that T0 corresponds to the
time of mid-transit.
['ld'] an optional flag that can be set to either None,
'quad' or 'nonlin' to specify the type of limb
darkening law to be used; if it is not set, then no limb
darkening will be used, and this would also be the case if
it is set to None.
['gam1']+['gam2'] quadratic limb darkening coeffs; required
if the 'ld' flag is set to 'quad'.
['c1']+['c2']+['c3']+['c4'] nonlinear limb darkening coeffs;
required if the 'ld' flag is set to 'nonlin'.
OUTPUT
** F - a numpy array containing the relative flux values of
the model transit lightcurve.
"""
# Start unpacking paramteres:
T0 = pars['T0']
P = pars['P']
Rs = pars['Rs']
Ms = pars['Ms']
Rp = pars['Rp']
Mp = pars['Mp']
SecDepth = pars['SecDepth']
incl_rad = pars['incl'] * np.pi/180.
ecc = pars['ecc']
try:
foot = pars[ 'foot' ]
except:
foot = 1.
try:
grad = pars[ 'grad' ]
except:
grad = 0.
try:
SecDepth = pars[ 'SecDepth' ]
except:
SecDepth = 0.
try:
tr_type = pars['tr_type']
except:
tr_type = 'both'
# Convert some of the units:
Rs *= consts.RSun
Ms *= consts.MSun
Rp *= consts.RJup
Mp *= consts.MJup
# Assuming a 2-body Keplerian orbit, use Kepler's
# third law to calculate the semimajor axis:
a = np.power( ( ( ( ( P*24.*60.*60./( 2*np.pi ) )**2 ) \
* consts.G * ( Ms + Mp) ) ) , (1./3.) )
aRs = a/Rs
RpRs = Rp/Rs
# Following the naming convention of the original
# Mandel & Agol (2002) paper for the limb darkening
# coefficients:
try:
if pars['ld']=='quad':
gam1 = pars[ 'gam1' ]
gam2 = pars[ 'gam2' ]
elif pars['ld']=='nonlin':
c1 = pars[ 'c1' ]
c2 = pars[ 'c2' ]
c3 = pars[ 'c3' ]
c4 = pars[ 'c4' ]
elif pars['ld']==None:
pars['ld'] = 'quad'
pars['gam1'] = 0.
pars['gam2'] = 0.
except:
pars['ld'] = 'quad'
pars['gam1'] = 0.
pars['gam2'] = 0.
# Calculate the mean anomaly:
MeanAnom = ( 2*np.pi/P )*( t - T0 )
# Calculate the normalised separation between the
# planetary and stellar discs:
if ecc != 0.:
omega_rad = pars['omega'] * np.pi/180.
NormSep = keporb.NormSep( MeanAnom, aRs, ecc, omega_rad, incl_rad )
else:
omega_rad = 3.*np.pi/2.
b = aRs*np.cos( incl_rad )
NormSep = np.sqrt( ( ( aRs*np.sin( MeanAnom ) )**2. ) \
+ ( ( b*np.cos( MeanAnom ) )**2. ) )
# If we want to model both primary transits and secondary
# eclipses, we need to compute the Z coordinate to determine
# when the planet is in front of the star (Z<0) and behind
# the star (Z>0):
if tr_type=='both':
F = np.ones( len( t ) )
zcoord = keporb.Zcoord( MeanAnom, aRs, ecc, omega_rad, incl_rad )
ixsf = ( zcoord < 0 )
if pars['ld']=='quad':
F[ixsf] = ma02.F_quad( NormSep[ixsf], RpRs, \
pars['gam1'], pars['gam2'] )
elif pars['ld']=='nonlin':
F[ixsf] = ma02.F_nonlin( NormSep[ixsf], RpRs, \
pars['c1'], pars['c2'], \
pars['c3'], pars['c4'] )
else:
pdb.set_trace()
ixsb = ( zcoord >= 0 )
temp = ma02.F_quad( NormSep[ixsb], RpRs, 0.0, 0.0 ) - 1.
F[ixsb] = 1 + temp*SecDepth/( temp.max() - temp.min() ) #/( RpRs**2. )
# If we're only interested in the primary transits then
# we must take stellar limb darkening into account
# while treating the planet as an non-luminous disc:
elif tr_type=='primary':
if pars['ld']=='quad':
F = ma02.F_quad( NormSep, RpRs, \
pars['gam1'], pars['gam2'] )
elif pars['ld']=='nonlin':
F = ma02.F_nonlin( NormSep, RpRs, \
pars['c1'], pars['c2'], \
pars['c3'], pars['c4'] )
else:
pdb.set_trace()
# If we're only interested in the secondary eclipses
# we treat the planet as a uniform disc with no limb
# darkening:
elif tr_type=='secondary':
temp = ma02.F_quad( NormSep, RpRs, 0.0, 0.0 ) - 1.
F = 1 + temp*SecDepth/( temp.max() - temp.min() ) #/( RpRs**2. )
# If requested, re-scale the lightcurve by a linear
# trend before returning the output:
if ( grad!=0. )+( foot!=1. ):
twid = t.max() - t.min()
tmid = t.min() + 0.5*twid
F = F * ( foot + grad*( t - tmid ) )
return F
def calc_T0( Ttr, P, ecc, omega, transit='primary' ):
"""
SUMMARY
Computes time of periapse passage given the mid-time of
either the primary transit or the secondary eclipse.
CALLING
T0 = transit.calc_T0( Ttr, P, ecc, omega, transit='primary' )
INPUTS
** Ttr - transit/eclipse mid-time.
** P - orbital period.
** ecc - orbital eccentricity.
** omega - argument of periapse in degrees.
KEYWORD INPUTS
** transit - 'primary' or 'secondary' depending on whether the
mid-time Ttr is for the primary transit or secondary eclipse;
default is 'primary'.
OUTPUT
** T0 - the time of periapse passage.
NOTES:
** Ttr and P must have the same units of time, and the output T0
will also have the same units.
** By default, the longitude of the ascending node (big Omega)
is implicitly taken to be 180 degrees in the calculations.
"""
# Convert omega from degrees to radians:
omega *= np.pi / 180.
# Make sure omega has been provided between 0-2pi:
while omega >= 2*np.pi:
omega -= 2*pi
while omega < 0:
omega += 2*np.pi
# Calculate the true anomaly corresponding to the
# midpoint of the transit/eclipse, and ensure that
# the value lies in the 0-2pi range:
if transit=='primary':
TrueAnom_tr = 3*np.pi/2. - omega
elif transit=='secondary':
TrueAnom_tr = np.pi/2. - omega
else:
pdb.set_trace()
while TrueAnom_tr >= 2*np.pi:
TrueAnom_tr -= 2*pi
while TrueAnom_tr < 0:
TrueAnom_tr += 2*np.pi
# Calculate the value of the eccentric anomaly at the time
# of transit/eclipse:
EccAnom_tr = 2 * np.arctan2( np.sqrt( 1. - ecc )*np.sin( TrueAnom_tr/2. ), \
np.sqrt( 1. + ecc )*np.cos( TrueAnom_tr/2. ) )
# Use the eccentric anomaly at the time of transit/eclipse
# to calculate the mean anomaly:
MeanAnom_tr = EccAnom_tr - ecc*np.sin( EccAnom_tr )
# Convert the mean anomaly to a time elapsed since the
# time of periastron passage:
delt = P * ( MeanAnom_tr/2./np.pi )
# Calculate the time of periastron passage:
T0 = Ttr - delt
return T0
def example():
"""
Simple routine that demonstrates various ways of
computing lightcurves, and plots the results.
"""
# Time values:
t = np.linspace( 0., 7., 10000 )
# Orbit properties:
ecc = 0.0
P = 2.1 # days
incl = 88.2 # degrees
omega = 90.0 # degrees
T_tr = 0.0 # time of transit
T0 = calc_T0( T_tr, P, ecc, omega, transit='primary' )
# Star-planet physical:
Rp = 1.0 # Jupiter radii
Mp = 1.0 # Jupiter masses
Rs = 1.0 # solar radii
Ms = 1.0 # solar masses
# Lightcurve properties:
SecDepth = 1e-3
foot = 1.0
grad = 0.0
# Calculate the semimajor axis using Kepler's
# third law:
a = np.power( ( ( ( ( P*24.*60.*60./( 2*np.pi ) )**2 ) * consts.G \
* ( Ms*consts.MSun + Mp*consts.MJup ) ) ) , (1./3.) )
RpRs = ( Rp*consts.RJup )/( Rs*consts.RSun )
aRs = a / ( Rs*consts.RSun )
# Nonlinear limb darkening coeffs:
c1 = -0.1
c2 = +1.4
c3 = -1.2
c4 = +0.5
# Quadratic limb darkening coeffs:
gam1 = 0.5
gam2 = 0.1
# Quadratic limb darkening + RsMsRpMp parameterisation:
pars_RsMsRpMp_q = { 'T0':T0, 'P':P, 'Ms':Ms, 'Mp':Mp, 'Rs':Rs, \
'Rp':Rp, 'SecDepth':SecDepth, 'incl':incl, \
'ecc':ecc, 'omega':omega, 'gam1':gam1, 'gam2':gam2, \
'ld':'quad', 'foot':foot, 'grad':grad }
F_RsMsRpMp_q = ma02_RsMsRpMp( t, **pars_RsMsRpMp_q )
# Nonlinear limb darkening + RsMsRpMp parameterisation:
pars_RsMsRpMp_nl = { 'T0':T0, 'P':P, 'Ms':Ms, 'Mp':Mp, 'Rs':Rs, \
'Rp':Rp, 'SecDepth':SecDepth, 'incl':incl, \
'ecc':ecc, 'omega':omega, 'c1':c1, 'c2':c2, \
'c3':c3, 'c4':c4, 'ld':'nonlin', \
'foot':foot, 'grad':grad }
F_RsMsRpMp_nl = ma02_RsMsRpMp( t, **pars_RsMsRpMp_nl )
# No limb darkening + RsMsRpMp parameterisation:
pars_RsMsRpMp_n = { 'T0':T0, 'P':P, 'Ms':Ms, 'Mp':Mp, 'Rs':Rs, \
'Rp':Rp, 'SecDepth':SecDepth, 'incl':incl, \
'ecc':ecc, 'omega':omega, 'ld':None, \
'foot':foot, 'grad':grad }
F_RsMsRpMp_n = ma02_RsMsRpMp( t, **pars_RsMsRpMp_n )
# Quadratic limb darkening + aRs parameterisation:
pars_aRs_q = { 'T0':T0, 'P':P, 'aRs':aRs, 'RpRs':RpRs, \
'SecDepth':SecDepth, 'incl':incl, 'ecc':ecc, \
'omega':omega, 'gam1':gam1, 'gam2':gam2,
'ld':'quad', 'foot':foot, 'grad':grad }
F_aRs_q = ma02_aRs( t, **pars_aRs_q )
# Nonlinear limb darkening + aRs parameterisation:
pars_aRs_nl = { 'T0':T0, 'P':P, 'aRs':aRs, 'RpRs':RpRs, \
'SecDepth':SecDepth, 'incl':incl, 'ecc':ecc, \
'omega':omega, 'c1':c1, 'c2':c2, 'c3':c3, 'c4':c4,
'ld':'nonlin', 'foot':foot, 'grad':grad }
F_aRs_nl = ma02_aRs( t, **pars_aRs_nl )
# No limb darkening + aRs parameterisation:
pars_aRs_n = { 'T0':T0, 'P':P, 'aRs':aRs, 'RpRs':RpRs, \
'SecDepth':SecDepth, 'incl':incl, 'ecc':ecc, \
'omega':omega, 'ld':None, \
'foot':foot, 'grad':grad }
F_aRs_n = ma02_aRs( t, **pars_aRs_n )
# Plot the results:
fig = plt.figure()
ax1 = fig.add_subplot( 211 )
ax1.plot( t, F_aRs_n, '--g', lw=1 )
ax1.plot( t, F_aRs_nl, '-m', lw=1 )
ax1.plot( t, F_aRs_q, '-b', lw=1 )
#ax1.set_ylim( [ 1-1.4*(RpRs**2.), 1+0.2*(RpRs**2.) ] )
ax1.set_ylim( [ 1. - 1.4*foot*(RpRs**2.), 1. + 0.2*foot*(RpRs**2.) ] )
ax2 = fig.add_subplot( 212, sharex=ax1 )
ax2.plot( t, F_RsMsRpMp_n, '--g', lw=1 )
ax2.plot( t, F_RsMsRpMp_nl, '-m', lw=1 )
ax2.plot( t, F_RsMsRpMp_q, '-b', lw=1 )
#ax2.set_ylim( [ 1-1.4*(RpRs**2.), 1+0.2*(RpRs**2.) ] )
ax2.set_ylim( [ 1. - 1.4*foot*(RpRs**2.), 1. + 0.2*foot*(RpRs**2.) ] )
ax2.set_xlabel( 'Time' )
# Discrepencies between output from routines
# using different parameterisations:
print( 'This should be zero --> {0:.10f}'\
.format( ( F_RsMsRpMp_q - F_aRs_q ).max() ) )
print( 'This should be zero --> {0:.10f}'\
.format( ( F_RsMsRpMp_nl - F_aRs_nl ).max() ) )
return None
|
gpl-2.0
|
stylianos-kampakis/scikit-learn
|
sklearn/cluster/birch.py
|
207
|
22706
|
# Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
bsd-3-clause
|
B3AU/waveTree
|
sklearn/preprocessing/tests/test_data.py
|
5
|
25313
|
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.sparsefuncs import mean_variance_axis0
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import add_dummy_feature
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_scaler_1d():
"""Test scaling of dataset along single axis"""
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
def test_scaler_2d_arrays():
"""Test scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
"""Check min max scaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(
X_csr_scaled.astype(np.float))
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
"""Check that StandardScaler.fit does not change input"""
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_warning_scaling_integers():
"""Check warning when scaling integer data"""
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
assert_warns(UserWarning, StandardScaler().fit, X)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
assert_warns(UserWarning, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
"""Test that KernelCenterer is equivalent to StandardScaler
in feature space"""
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder():
"""Test OneHotEncoder's fit and transform."""
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raise when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
|
bsd-3-clause
|
krahman/BuildingMachineLearningSystemsWithPython
|
ch09/02_ceps_based_classifier.py
|
1
|
3582
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from collections import defaultdict
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.metrics import confusion_matrix
from utils import plot_roc, plot_confusion_matrix, GENRE_LIST, TEST_DIR
from ceps import read_ceps
genre_list = GENRE_LIST
def train_model(clf_factory, X, Y, name, plot=False):
labels = np.unique(Y)
cv = ShuffleSplit(
n=len(X), n_iter=1, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = defaultdict(list)
precisions, recalls, thresholds = defaultdict(
list), defaultdict(list), defaultdict(list)
roc_scores = defaultdict(list)
tprs = defaultdict(list)
fprs = defaultdict(list)
clfs = [] # just to later get the median
cms = []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
scores.append(test_score)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cms.append(cm)
for label in labels:
y_label_test = np.asarray(y_test == label, dtype=int)
proba = clf.predict_proba(X_test)
proba_label = proba[:, label]
precision, recall, pr_thresholds = precision_recall_curve(
y_label_test, proba_label)
pr_scores[label].append(auc(recall, precision))
precisions[label].append(precision)
recalls[label].append(recall)
thresholds[label].append(pr_thresholds)
fpr, tpr, roc_thresholds = roc_curve(y_label_test, proba_label)
roc_scores[label].append(auc(fpr, tpr))
tprs[label].append(tpr)
fprs[label].append(fpr)
if plot:
for label in labels:
print("Plotting %s"%genre_list[label])
scores_to_sort = roc_scores[label]
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
desc = "%s %s" % (name, genre_list[label])
plot_roc(roc_scores[label][median], desc, tprs[label][median],
fprs[label][median], label='%s vs rest' % genre_list[label])
all_pr_scores = np.asarray(pr_scores.values()).flatten()
summary = (np.mean(scores), np.std(scores),
np.mean(all_pr_scores), np.std(all_pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors), np.asarray(cms)
def create_model():
from sklearn.linear_model.logistic import LogisticRegression
clf = LogisticRegression()
return clf
if __name__ == "__main__":
X, y = read_ceps(genre_list)
train_avg, test_avg, cms = train_model(
create_model, X, y, "Log Reg CEPS", plot=True)
cm_avg = np.mean(cms, axis=0)
cm_norm = cm_avg / np.sum(cm_avg, axis=0)
plot_confusion_matrix(cm_norm, genre_list, "ceps",
"Confusion matrix of a CEPS based classifier")
|
mit
|
huaxz1986/git_book
|
chapters/KNN_Dimension_Reduction/pca.py
|
1
|
2016
|
# -*- coding: utf-8 -*-
"""
kNN和降维
~~~~~~~~~~
PCA
:copyright: (c) 2016 by the huaxz1986.
:license: lgpl-3.0, see LICENSE for more details.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets,decomposition
def load_data():
'''
加载用于降维的数据
:return: 一个元组,依次为训练样本集和样本集的标记
'''
iris=datasets.load_iris()# 使用 scikit-learn 自带的 iris 数据集
return iris.data,iris.target
def test_PCA(*data):
'''
测试 PCA 的用法
:param data: 可变参数。它是一个元组,这里要求其元素依次为:训练样本集、训练样本的标记
:return: None
'''
X,y=data
pca=decomposition.PCA(n_components=None) # 使用默认的 n_components
pca.fit(X)
print('explained variance ratio : %s'% str(pca.explained_variance_ratio_))
def plot_PCA(*data):
'''
绘制经过 PCA 降维到二维之后的样本点
:param data: 可变参数。它是一个元组,这里要求其元素依次为:训练样本集、训练样本的标记
:return: None
'''
X,y=data
pca=decomposition.PCA(n_components=2) # 目标维度为2维
pca.fit(X)
X_r=pca.transform(X) # 原始数据集转换到二维
###### 绘制二维数据 ########
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
colors=((1,0,0),(0,1,0),(0,0,1),(0.5,0.5,0),(0,0.5,0.5),(0.5,0,0.5),
(0.4,0.6,0),(0.6,0.4,0),(0,0.6,0.4),(0.5,0.3,0.2),) # 颜色集合,不同标记的样本染不同的颜色
for label ,color in zip( np.unique(y),colors):
position=y==label
ax.scatter(X_r[position,0],X_r[position,1],label="target= %d"%label,color=color)
ax.set_xlabel("X[0]")
ax.set_ylabel("Y[0]")
ax.legend(loc="best")
ax.set_title("PCA")
plt.show()
if __name__=='__main__':
X,y=load_data() # 产生用于降维的数据集
test_PCA(X,y) # 调用 test_PCA
#plot_PCA(X,y) # 调用 plot_PCA
|
gpl-3.0
|
yl565/statsmodels
|
statsmodels/datasets/star98/data.py
|
5
|
3935
|
"""Star98 Educational Testing dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "Star98 Educational Dataset"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Math scores for 303 student with 10 explanatory factors"""
DESCRLONG = """
This data is on the California education policy and outcomes (STAR program
results for 1998. The data measured standardized testing by the California
Department of Education that required evaluation of 2nd - 11th grade students
by the the Stanford 9 test on a variety of subjects. This dataset is at
the level of the unified school district and consists of 303 cases. The
binary response variable represents the number of 9th graders scoring
over the national median value on the mathematics exam.
The data used in this example is only a subset of the original source.
"""
NOTE = """::
Number of Observations - 303 (counties in California).
Number of Variables - 13 and 8 interaction terms.
Definition of variables names::
NABOVE - Total number of students above the national median for the
math section.
NBELOW - Total number of students below the national median for the
math section.
LOWINC - Percentage of low income students
PERASIAN - Percentage of Asian student
PERBLACK - Percentage of black students
PERHISP - Percentage of Hispanic students
PERMINTE - Percentage of minority teachers
AVYRSEXP - Sum of teachers' years in educational service divided by the
number of teachers.
AVSALK - Total salary budget including benefits divided by the number
of full-time teachers (in thousands)
PERSPENK - Per-pupil spending (in thousands)
PTRATIO - Pupil-teacher ratio.
PCTAF - Percentage of students taking UC/CSU prep courses
PCTCHRT - Percentage of charter schools
PCTYRRND - Percentage of year-round schools
The below variables are interaction terms of the variables defined
above.
PERMINTE_AVYRSEXP
PEMINTE_AVSAL
AVYRSEXP_AVSAL
PERSPEN_PTRATIO
PERSPEN_PCTAF
PTRATIO_PCTAF
PERMINTE_AVTRSEXP_AVSAL
PERSPEN_PTRATIO_PCTAF
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the star98 data and returns a Dataset class instance.
Returns
-------
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
data = _get_data()
return du.process_recarray(data, endog_idx=[0, 1], dtype=float)
def load_pandas():
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=['NABOVE', 'NBELOW'],
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
names = ["NABOVE","NBELOW","LOWINC","PERASIAN","PERBLACK","PERHISP",
"PERMINTE","AVYRSEXP","AVSALK","PERSPENK","PTRATIO","PCTAF",
"PCTCHRT","PCTYRRND","PERMINTE_AVYRSEXP","PERMINTE_AVSAL",
"AVYRSEXP_AVSAL","PERSPEN_PTRATIO","PERSPEN_PCTAF","PTRATIO_PCTAF",
"PERMINTE_AVYRSEXP_AVSAL","PERSPEN_PTRATIO_PCTAF"]
with open(filepath + '/star98.csv',"rb") as f:
data = recfromtxt(f, delimiter=",",
names=names, skip_header=1, dtype=float)
# careful now
nabove = data['NABOVE'].copy()
nbelow = data['NBELOW'].copy()
data['NABOVE'] = nbelow # successes
data['NBELOW'] = nabove - nbelow # now failures
return data
|
bsd-3-clause
|
bhargav/scikit-learn
|
sklearn/utils/arpack.py
|
265
|
64837
|
"""
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
|
bsd-3-clause
|
uqyge/combustionML
|
FPV_ANN_pureResNet/FPV_resnet_fullycoupled.py
|
1
|
5137
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
from keras.models import Model
from keras.layers import Dense, Input
from keras.callbacks import ModelCheckpoint
from resBlock import res_block_org
from data_reader import read_hdf_data, read_hdf_data_psi
from writeANNProperties import writeANNProperties
from keras import backend as K
from keras.models import load_model
import ast
##########################
# Parameters
n_neuron = 500
branches = 3
scale = 3
batch_size = 1024*4
epochs = 2000
vsplit = 0.1
batch_norm = False
# define the type of scaler: MinMax or Standard
scaler = 'Standard' # 'Standard' 'MinMax'
##########################
labels = []
with open('GRI_species_order_reduced', 'r') as f:
species = f.readlines()
for line in species:
# remove linebreak which is the last character of the string
current_place = line[:-1]
# add item to the list
labels.append(current_place)
# append other fields: heatrelease, T, PVs
#labels.append('heatRelease')
labels.append('T')
labels.append('PVs')
# tabulate psi, mu, alpha
labels.append('psi')
labels.append('mu')
labels.append('alpha')
# DO NOT CHANGE THIS ORDER!!
input_features=['f','zeta','pv']
# read in the data
X, y, df, in_scaler, out_scaler = read_hdf_data_psi('./tables_of_fgm.H5',key='of_tables',
in_labels=input_features, labels = labels,scaler=scaler)
# split into train and test data
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.01)
# %%
print('set up ANN')
# ANN parameters
dim_input = X_train.shape[1]
dim_label = y_train.shape[1]
# This returns a tensor
inputs = Input(shape=(dim_input,))#,name='input_1')
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(n_neuron, activation='relu')(inputs)
#
# x = res_block(x, scale, n_neuron, stage=1, block='a', bn=batch_norm,branches=branches)
# x = res_block(x, scale, n_neuron, stage=1, block='b', bn=batch_norm,branches=branches)
# x = res_block(x, scale, n_neuron, stage=1, block='c', bn=batch_norm,branches=branches)
x = res_block_org(x, n_neuron, stage=1, block='a', bn=batch_norm)
x = res_block_org(x, n_neuron, stage=1, block='b', bn=batch_norm)
x = res_block_org(x, n_neuron, stage=1, block='c', bn=batch_norm)
#x = res_block(x, n_neuron, stage=1, block='d', bn=batch_norm)
predictions = Dense(dim_label, activation='linear')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
# get the model summary
model.summary()
# checkpoint (save the best model based validate loss)
filepath = "./tmp/weights.best.cntk.hdf5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=10)
callbacks_list = [checkpoint]
# fit the model
history = model.fit(
X_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_split=vsplit,
verbose=2,
callbacks=callbacks_list,
shuffle=True)
#%%
model.load_weights("./tmp/weights.best.cntk.hdf5")
# cntk.combine(model.outputs).save('mayerTest.dnn')
# # %%
# ref = df.loc[df['p'] == 40]
# x_test = in_scaler.transform(ref[['p', 'he']])
predict_val = model.predict(X_test)
X_test_df = pd.DataFrame(in_scaler.inverse_transform(X_test),columns=input_features)
y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels)
sp='PVs'
# loss
fig = plt.figure()
plt.semilogy(history.history['loss'])
if vsplit:
plt.semilogy(history.history['val_loss'])
plt.title('mse')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.savefig('./exported/Loss_%s_%s_%i.eps' % (sp,scaler,n_neuron),format='eps')
plt.show(block=False)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)
plt.figure()
plt.title('Error of %s ' % sp)
plt.plot((y_test_df[sp] - predict_df[sp]) / y_test_df[sp])
plt.title(sp)
plt.savefig('./exported/Error_%s_%s_%i.eps' % (sp,scaler,n_neuron),format='eps')
plt.show(block=False)
plt.figure()
plt.scatter(predict_df[sp],y_test_df[sp],s=1)
plt.title('R2 for '+sp)
plt.savefig('./exported/R2_%s_%s_%i.eps' % (sp,scaler,n_neuron),format='eps')
plt.show(block=False)
# %%
a=(y_test_df[sp] - predict_df[sp]) / y_test_df[sp]
test_data=pd.concat([X_test_df,y_test_df],axis=1)
pred_data=pd.concat([X_test_df,predict_df],axis=1)
test_data.to_hdf('sim_check.H5',key='test')
pred_data.to_hdf('sim_check.H5',key='pred')
# Save model
sess = K.get_session()
saver = tf.train.Saver(tf.global_variables())
saver.save(sess, './exported/my_model')
model.save('FPV_ANN_tabulated_%s.H5' % scaler)
# write the OpenFOAM ANNProperties file
writeANNProperties(in_scaler,out_scaler,scaler)
# Convert the model to
#run -i k2tf.py --input_model='FPV_ANN_tabulated_Standard.H5' --output_model='exported/FPV_ANN_tabulated_Standard.pb'
|
mit
|
femtotrader/arctic-updater
|
arctic_updater/utils.py
|
1
|
1869
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import logging
logger = logging.getLogger(__name__)
import requests_cache
import pandas as pd
from collections import OrderedDict
def get_session(expire_after, cache_name='cache'):
"""
Returns a `requests.Session` or a `requests_cache.CachedSession`
Parameters
----------
expire_after : `str`
cache expiration delay
'-1' : no cache
'0' : no expiration
'00:15:00.0' : expiration delay
cache_filename : `str`
Name of cache file
"""
if expire_after=='-1':
expire_after = None
logger.debug("expire_after==0 no cache")
session = requests.Session()
else:
if expire_after=='0':
expire_after = 0
logger.debug("Installing cache '%s.sqlite' without expiration" % cache_name)
else:
expire_after = pd.to_timedelta(expire_after, unit='s')
logger.debug("Installing cache '%s.sqlite' with expire_after=%s (d days hh:mm:ss)" % (cache_name, expire_after))
session = requests_cache.CachedSession(\
cache_name=cache_name, expire_after=expire_after)
return session
def tablename_to_dict(tablename):
"""
>>> tablename_to_dict('OHLCV_D_PDR_YAHOO_AAPL')
OrderedDict([('store', 'OHLCV'), ('freq', 'D'), ('updater', 'PDR'), ('source', 'YAHOO'), ('symbol', 'AAPL')])
"""
lst = tablename.split('_')
store, freq, updater_shortname, source, symbol = lst
d = OrderedDict([
('store', store),
('freq', freq),
('updater', updater_shortname),
('source', source),
('symbol', symbol)
])
return d
def main():
import doctest
doctest.testmod()
if __name__ == '__main__':
main()
|
isc
|
wkuling/sklearn_pycon2014
|
notebooks/fig_code/ML_flow_chart.py
|
61
|
4970
|
"""
Tutorial Diagrams
-----------------
This script plots the flow-charts used in the scikit-learn tutorials.
"""
import numpy as np
import pylab as pl
from matplotlib.patches import Circle, Rectangle, Polygon, Arrow, FancyArrow
def create_base(box_bg = '#CCCCCC',
arrow1 = '#88CCFF',
arrow2 = '#88FF88',
supervised=True):
fig = pl.figure(figsize=(9, 6), facecolor='w')
ax = pl.axes((0, 0, 1, 1),
xticks=[], yticks=[], frameon=False)
ax.set_xlim(0, 9)
ax.set_ylim(0, 6)
patches = [Rectangle((0.3, 3.6), 1.5, 1.8, zorder=1, fc=box_bg),
Rectangle((0.5, 3.8), 1.5, 1.8, zorder=2, fc=box_bg),
Rectangle((0.7, 4.0), 1.5, 1.8, zorder=3, fc=box_bg),
Rectangle((2.9, 3.6), 0.2, 1.8, fc=box_bg),
Rectangle((3.1, 3.8), 0.2, 1.8, fc=box_bg),
Rectangle((3.3, 4.0), 0.2, 1.8, fc=box_bg),
Rectangle((0.3, 0.2), 1.5, 1.8, fc=box_bg),
Rectangle((2.9, 0.2), 0.2, 1.8, fc=box_bg),
Circle((5.5, 3.5), 1.0, fc=box_bg),
Polygon([[5.5, 1.7],
[6.1, 1.1],
[5.5, 0.5],
[4.9, 1.1]], fc=box_bg),
FancyArrow(2.3, 4.6, 0.35, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.75, 4.2, 0.5, -0.2, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(5.5, 2.4, 0, -0.4, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(2.0, 1.1, 0.5, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.3, 1.1, 1.3, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(6.2, 1.1, 0.8, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2)]
if supervised:
patches += [Rectangle((0.3, 2.4), 1.5, 0.5, zorder=1, fc=box_bg),
Rectangle((0.5, 2.6), 1.5, 0.5, zorder=2, fc=box_bg),
Rectangle((0.7, 2.8), 1.5, 0.5, zorder=3, fc=box_bg),
FancyArrow(2.3, 2.9, 2.0, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
Rectangle((7.3, 0.85), 1.5, 0.5, fc=box_bg)]
else:
patches += [Rectangle((7.3, 0.2), 1.5, 1.8, fc=box_bg)]
for p in patches:
ax.add_patch(p)
pl.text(1.45, 4.9, "Training\nText,\nDocuments,\nImages,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.6, 4.9, "Feature\nVectors",
ha='left', va='center', fontsize=14)
pl.text(5.5, 3.5, "Machine\nLearning\nAlgorithm",
ha='center', va='center', fontsize=14)
pl.text(1.05, 1.1, "New Text,\nDocument,\nImage,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.3, 1.7, "Feature\nVector",
ha='left', va='center', fontsize=14)
pl.text(5.5, 1.1, "Predictive\nModel",
ha='center', va='center', fontsize=12)
if supervised:
pl.text(1.45, 3.05, "Labels",
ha='center', va='center', fontsize=14)
pl.text(8.05, 1.1, "Expected\nLabel",
ha='center', va='center', fontsize=14)
pl.text(8.8, 5.8, "Supervised Learning Model",
ha='right', va='top', fontsize=18)
else:
pl.text(8.05, 1.1,
"Likelihood\nor Cluster ID\nor Better\nRepresentation",
ha='center', va='center', fontsize=12)
pl.text(8.8, 5.8, "Unsupervised Learning Model",
ha='right', va='top', fontsize=18)
def plot_supervised_chart(annotate=False):
create_base(supervised=True)
if annotate:
fontdict = dict(color='r', weight='bold', size=14)
pl.text(1.9, 4.55, 'X = vec.fit_transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(3.7, 3.2, 'clf.fit(X, y)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(1.7, 1.5, 'X_new = vec.transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(6.1, 1.5, 'y_new = clf.predict(X_new)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
def plot_unsupervised_chart():
create_base(supervised=False)
if __name__ == '__main__':
plot_supervised_chart(False)
plot_supervised_chart(True)
plot_unsupervised_chart()
pl.show()
|
bsd-3-clause
|
akionakamura/scikit-learn
|
examples/linear_model/plot_ransac.py
|
250
|
1673
|
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
|
bsd-3-clause
|
Achuth17/scikit-learn
|
examples/cluster/plot_dbscan.py
|
346
|
2479
|
# -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
harisbal/pandas
|
pandas/tests/indexes/test_frozen.py
|
2
|
3493
|
import warnings
import numpy as np
from pandas.compat import u
from pandas.core.indexes.frozen import FrozenList, FrozenNDArray
from pandas.tests.test_base import CheckImmutable, CheckStringMixin
from pandas.util import testing as tm
class TestFrozenList(CheckImmutable, CheckStringMixin):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setup_method(self, _):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_iadd(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# Other shouldn't be mutated.
self.check_result(r, self.lst)
def test_union(self):
result = self.container.union((1, 2, 3))
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
def test_difference(self):
result = self.container.difference([2])
expected = FrozenList([1, 3, 4, 5])
self.check_result(result, expected)
def test_difference_dupe(self):
result = FrozenList([1, 2, 3, 2]).difference([2])
expected = FrozenList([1, 3])
self.check_result(result, expected)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin):
mutable_methods = ('put', 'itemset', 'fill')
def setup_method(self, _):
self.lst = [3, 5, 7, -2]
self.klass = FrozenNDArray
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
self.container = FrozenNDArray(self.lst)
self.unicode_container = FrozenNDArray(
[u("\u05d0"), u("\u05d1"), "c"])
def test_constructor_warns(self):
# see gh-9031
with tm.assert_produces_warning(FutureWarning):
FrozenNDArray([1, 2, 3])
def test_shallow_copying(self):
original = self.container.copy()
assert isinstance(self.container.view(), FrozenNDArray)
assert not isinstance(self.container.view(np.ndarray), FrozenNDArray)
assert self.container.view() is not self.container
tm.assert_numpy_array_equal(self.container, original)
# Shallow copy should be the same too
assert isinstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container):
container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
tm.assert_numpy_array_equal(original, vals)
assert original is not vals
vals[0] = n
assert isinstance(self.container, FrozenNDArray)
tm.assert_numpy_array_equal(self.container.values(), original)
assert vals[0] == n
def test_searchsorted(self):
expected = 2
assert self.container.searchsorted(7) == expected
with tm.assert_produces_warning(FutureWarning):
assert self.container.searchsorted(v=7) == expected
|
bsd-3-clause
|
nikitasingh981/scikit-learn
|
examples/cluster/plot_face_segmentation.py
|
71
|
2839
|
"""
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
# load the raccoon face as a numpy array
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
|
bsd-3-clause
|
ltiao/scikit-learn
|
benchmarks/bench_sparsify.py
|
323
|
3372
|
"""
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
|
bsd-3-clause
|
bencebeky/etudes
|
hue1.py
|
1
|
1440
|
import numpy
import cv2
from com.dtmilano.android.adb.adbclient import AdbClient
from scipy.ndimage import label
from matplotlib import pyplot
pyplot.interactive(True)
screen = AdbClient(serialno='.*').takeSnapshot()
#screen = cv2.imread('screen.png')
rgb = numpy.array(screen)[::4,::4,:3]
rgb2 = numpy.array(rgb[:,:,0], dtype=numpy.uint32)
rgb2 = rgb2 + 256 * rgb[:,:,1] + 256 * 256 * rgb[:,:,2]
colors = numpy.unique(rgb2.flatten())
labels = numpy.zeros(rgb.shape[:2])
label = 1
thresholdmin = numpy.prod(rgb.shape[:2])/500
thresholdmax = 10 * thresholdmin
tilecolors = []
for color in colors:
mask = rgb2 == color
if (numpy.sum(mask) < thresholdmin or numpy.sum(mask) > thresholdmax):
continue
labels[mask] = label
label += 1
tilecolors.append(rgb[mask][0])
label -= 1
tilecolors = numpy.array(tilecolors)
figure, axarr = pyplot.subplots(1, 2)
spaceplot = axarr[0]
spaceplot.set_xlim(0, rgb.shape[1])
spaceplot.set_ylim(-rgb.shape[0], 0)
xarray, yarray = numpy.meshgrid(numpy.arange(rgb.shape[1]), numpy.arange(rgb.shape[0]))
for i in range(label):
mask = labels == i+1
spaceplot.text(numpy.mean(xarray[mask]), -numpy.mean(yarray[mask]), str(i+1))
colorplot = axarr[1]
xcolor = tilecolors[:,0]
ycolor = -tilecolors[:,2]
colorplot.set_xlim(numpy.min(xcolor)-10, numpy.max(xcolor)+10)
colorplot.set_ylim(numpy.max(ycolor)+10, numpy.min(ycolor)-10)
for i in range(label):
colorplot.text(xcolor[i], ycolor[i], str(i+1))
|
gpl-3.0
|
fluxcapacitor/source.ml
|
jupyterhub.ml/notebooks/train_deploy/zz_under_construction/zz_old/TensorFlow/SkFlow_DEPRECATED/mnist.py
|
6
|
3005
|
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from sklearn import metrics
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import skflow
### Download and load MNIST data.
mnist = input_data.read_data_sets('MNIST_data')
### Linear classifier.
classifier = skflow.TensorFlowLinearClassifier(
n_classes=10, batch_size=100, steps=1000, learning_rate=0.01)
classifier.fit(mnist.train.images, mnist.train.labels)
score = metrics.accuracy_score(mnist.test.labels, classifier.predict(mnist.test.images))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
def conv_model(X, y):
# reshape X to 4d tensor with 2nd and 3rd dimensions being image width and height
# final dimension being the number of color channels
X = tf.reshape(X, [-1, 28, 28, 1])
# first conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = skflow.ops.conv2d(X, n_filters=32, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# second conv layer will compute 64 features for each 5x5 patch
with tf.variable_scope('conv_layer2'):
h_conv2 = skflow.ops.conv2d(h_pool1, n_filters=64, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# densely connected layer with 1024 neurons
h_fc1 = skflow.ops.dnn(h_pool2_flat, [1024], activation=tf.nn.relu, keep_prob=0.5)
return skflow.models.logistic_regression(h_fc1, y)
# Training and predicting
classifier = skflow.TensorFlowEstimator(
model_fn=conv_model, n_classes=10, batch_size=100, steps=20000,
learning_rate=0.001)
classifier.fit(mnist.train.images, mnist.train.labels)
score = metrics.accuracy_score(mnist.test.labels, classifier.predict(mnist.test.images))
print('Accuracy: {0:f}'.format(score))
|
apache-2.0
|
sieben/makesense
|
makesense/plot.py
|
1
|
13078
|
# -*- coding: utf-8 -*-
"""
Module building up the graph using mat plot lib
"""
import pdb
from csv import DictReader
from os.path import join as pj
import logging
import matplotlib
# matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
log = logging.getLogger("plot")
def plot_iotlab_energy(folder):
df = pd.read_csv(pj(folder, "energy.csv"))
df.set_index("time", inplace=True)
targets = df.mote_id.unique()
for target in targets:
ax = df[df.mote_id == target].voltage.plot()
ax.set_ylabel("Voltage [V]")
ax.set_xlabel("Time [s]")
fig = plt.gcf()
# fig.set_size_inches(18.5, 10.5)
fig.savefig(pj(folder, 'voltage_%s.png' % target))
plt.close('all')
def overhead(folder):
"""
Plot the overhead (RPL, ACK,...).
This graph measures the amount of bytes sent by nodes that are not
application oriented (UDP, ping or CoAP) therefore we can see the amount
of bytes transmitted just to keep the network alive. This packets are
usually RPL and ACK packets.)
"""
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title('RPL traffic by time')
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('RPL traffic (bytes)')
with open(pj(folder, "results", "io.csv")) as io_csv_f:
reader = DictReader(io_csv_f)
time, overhead_bytes = [], []
for row in reader:
time.append(float(row["bin_end"]))
overhead_bytes.append(float(row["total_bytes"])
- float(row["udp_bytes"])
- float(row["ping_bytes"])
- float(row["coap_bytes"]))
ax1.plot(time, overhead_bytes)
img_path = pj(self.img_dir, "overhead.png")
fig.savefig(img_path)
plt.close('all')
def dashboard(folder):
output_folder = pj(folder, "results", "dashboard")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
depth_df = pd.read_csv(pj(folder, "results", "depth.csv"))
targets = depth_df.node
for target in targets:
df = pd.read_csv(pj(output_folder, "res_%d.csv" % int(target)))
df.drop("tx", 1).drop("rx", 1).plot(kind="bar")
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
fig.savefig(pj(output_folder, 'dashboard_%d.png' % target))
plt.close('all')
def protocol_repartition_depth(folder):
output_folder = pj(folder, "results", "protocol_repartition")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
depth_df = pd.read_csv(pj(folder, "results", "depth.csv"))
depth_df.set_index("node", inplace=True)
pcap_df = pd.read_csv(pj(folder, "results", "pcap_relooked.csv"))
pcap_df = pcap_df.join(depth_df, on="mac_src")
res = pd.DataFrame()
res["rpl"] = pcap_df[pcap_df.icmpv6_type == "rpl"].groupby("depth").sum().length
res["udp"] = pcap_df[pcap_df.icmpv6_type == "udp"].groupby("depth").sum().length
RATE = 250000
res["rpl"] = 8.0 * res["rpl"] / RATE
res["udp"] = 8.0 * res["udp"] / RATE
ax = res.plot(kind="bar", stacked=True)
ax.set_ylabel('Time [s]')
ax.set_xlabel("Depth")
ax.set_xticklabels(res.index.map(int), rotation=0)
fig = plt.gcf()
plt.tight_layout()
# fig.set_size_inches(18.5, 10.5)
fig.savefig(pj(output_folder, 'protocol_repartition_depth.png'))
plt.close('all')
def protocol_repartition_aggregated(folder, BIN=25):
output_folder = pj(folder, "results", "protocol_repartition")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# depth_df = pd.read_csv(pj(folder, "results", "depth.csv"))
# depth_df.set_index("node", inplace=True)
pcap_df = pd.read_csv(pj(folder, "results", "pcap_relooked.csv"))
pcap_df["bin_start"] = BIN * (pcap_df.time // BIN)
res = pd.DataFrame()
RATE = 250000
res["udp"] = pcap_df[(pcap_df.icmpv6_type == "udp") & (pcap_df.time < 200)].groupby("bin_start").sum().length
res["rpl"] = pcap_df[(pcap_df.icmpv6_type == "rpl") & (pcap_df.time < 200)].groupby("bin_start").sum().length
res["rpl"] = 8.0 * res["rpl"] / RATE
res["udp"] = 8.0 * res["udp"] / RATE
ax = res[["rpl", "udp"]].plot(kind="bar", stacked=True)
ax.set_ylabel('Time [s]')
ax.set_xlabel("Time [s]")
ax.set_ylim(0, 4.5)
ax.set_xticklabels(res.index.map(int), rotation=0)
fig = plt.gcf()
plt.tight_layout()
# fig.set_size_inches(18.5, 10.5)
fig.savefig(pj(output_folder, 'protocol_repartition_aggregated.png'))
plt.close('all')
def protocol_repartition(folder):
"""
Representation of protocol repartition (stacked histogram)
Include UDP, CoAP, ping, RPL, other...
This graph represents through time the repartition of the protocol
usage. We obtain this graph by analyzing through the PCAP produced by
our simulator. As we can see the amount of packets produced by the
routing protocol is very high at the beginning of the simulation then
come down to a relatively stable rate. The trickle mechanism in RPL
cause the periodic reconstruction of the route.
"""
output_folder = pj(folder, "results", "protocol_repartition")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
depth_df = pd.read_csv(pj(folder, "results", "depth.csv"))
targets = depth_df.node.unique()
for target in targets:
df = pd.read_csv(pj(output_folder, 'protocol_repartition_%d.csv' % target))
df = df[df["bin_start"] < 200]
df.set_index("bin_start", inplace=True)
df["udp"] = df["udp"] - df["forwarding"]
ax = df[["rpl", "udp", "forwarding"]].plot(kind="bar", stacked=True)
ax.set_ylabel('Time [s]')
ax.set_xlabel("Time [s]")
ax.set_ylim([0.0, df.max().max() * 1.5])
ax.set_xticklabels(df.index.map(int), rotation=0)
# pdb.set_trace()
fig = plt.gcf()
plt.legend(ncol=3)
plt.tight_layout()
# fig.set_size_inches(18.5, 10.5)
fig.savefig(pj(output_folder, "protocol_repartition_%d.png" % target))
plt.close('all')
def pdr(folder, BIN=25):
output_folder = pj(folder, "results", "pdr")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
depth_df = pd.read_csv(pj(folder, "results", "depth.csv"))
# depth_df.set_index("node", inplace=True)
targets = depth_df.node.unique()
for target in targets:
pdr_df = pd.read_csv(pj(output_folder, "pdr_%d.csv" % target))
pdr_df["bin_start"] = BIN * (pdr_df.departure_time // BIN)
pdr_df.groupby("bin_start").count()
res = pdr_df.groupby("bin_start").count().arrival_time / pdr_df.groupby("bin_start").count().departure_time
res.plot(kind='bar')
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
fig.savefig(pj(output_folder, 'pdr_%d.png' % target))
plt.close('all')
def pdr_depth(folder):
output_folder = pj(folder, "results", "pdr")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
pdr_df = pd.read_csv(pj(output_folder, "pdr_depth.csv"))
pdr_df.set_index("depth", inplace=True)
ax = pdr_df["avg"].plot(kind="bar", yerr=pdr_df["std"])
ax.set_ylabel('avg PDR')
ax.set_xlabel("Depth")
ax.set_xticklabels(pdr_df.index, rotation=0)
fig = plt.gcf()
plt.tight_layout()
# fig.set_size_inches(18.5, 10.5)
fig.savefig(pj(output_folder, 'pdr_depth.png'))
plt.close('all')
def strobes(folder):
output_folder = pj(folder, "results", "strobes")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
depth_df = pd.read_csv(pj(folder, "results", "depth.csv"))
# depth_df.set_index("node", inplace=True)
targets = depth_df.node.unique()
for target in targets:
strobes_df = pd.read_csv(pj(folder, "results", "strobes", "strobes_%d.csv" % target))
strobes_df.set_index("bin_start", inplace=True)
strobes_df.plot(kind='bar')
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
fig.savefig(pj(folder, "results", "strobes", 'strobes_%d.png' % target),
)
plt.close('all')
def strobes_depth(folder):
latexify()
plt.xticks(rotation=90)
output_folder = pj(folder, "results", "strobes")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
strobes_df = pd.read_csv(pj(output_folder, "strobes_depth.csv"))
strobes_df.set_index("depth", inplace=True)
ax = strobes_df["avg"].plot(kind="bar", yerr=strobes_df["std"])
ax.set_xticklabels(strobes_df.index, rotation=0)
ax.set_ylabel('Packet strobing')
ax.set_xlabel("Depth")
fig = plt.gcf()
plt.tight_layout()
fig.savefig(pj(output_folder, 'strobes_depth.png'))
plt.close('all')
def energy(self):
"""
Powertracker analyze the energy consumption by using the amount of
time that every node spend in a transmitting reception, interference
or on mode.
"""
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title('Energy measured by Powertracker')
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('Energy (Ah)')
results = defaultdict(dict)
with open(self.powertracker_csv) as f:
reader = DictReader(f)
for row in reader:
results[row["mote_id"]].setdefault(
"time", []).append(row["monitored_time"])
results[row["mote_id"]].setdefault(
"energy", []).append(row["energy_consumed"])
for node, values in results.items():
ax1.plot(values["time"], values["energy"], label=node)
ax1.legend(loc="upper left")
img_path = PJ(self.img_dir, "energy.png")
fig.savefig(img_path)
plt.close('all')
def energy_depth(self):
"""
Energy used by depth of a node in the RPL tree.
Energy used by nodes that are a fixed depth from the root.
"""
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.set_title('Energy by depth')
ax1.set_ylabel('Energy (Ah)')
ax1.set_xlabel('Depth')
with open(PJ(self.result_dir, "depth_energy.csv")) as f:
reader = DictReader(f)
depth, mean, std = [], [], []
for row in reader:
depth.append(row["depth"])
mean.append(float(row["mean_energy"]))
std.append(float(row["std_energy"]))
n_groups = len(depth)
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.4
error_config = {'ecolor': '0.3'}
ax1.bar(index, mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label='Depth')
plt.xticks(index + bar_width, depth)
plt.legend()
plt.tight_layout()
img_path = PJ(self.img_dir, "energy_depth.png")
plt.savefig(img_path)
from math import sqrt
import matplotlib
def latexify(fig_width=None, fig_height=None, columns=1):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1, 2])
if fig_width is None:
fig_width = 3.39 if columns == 1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {'backend': 'ps',
'text.latex.preamble': ["\\usepackage{gensymb}"],
'axes.labelsize': 9, # fontsize for x and y labels (was 12)
'axes.titlesize': 9,
'font.size': 9, # was 12
'legend.fontsize': 9, # was 12
'xtick.labelsize': 9,
'ytick.labelsize': 9,
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'font.family': 'serif'
}
matplotlib.rcParams.update(params)
def format_axes(ax):
SPINE_COLOR = 'gray'
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(SPINE_COLOR)
ax.spines[spine].set_linewidth(0.5)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=SPINE_COLOR)
return ax
|
apache-2.0
|
hlin117/scikit-learn
|
examples/manifold/plot_compare_methods.py
|
52
|
3878
|
"""
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
wkfwkf/statsmodels
|
statsmodels/tools/tests/test_pca.py
|
25
|
13934
|
from __future__ import print_function, division
from unittest import TestCase
import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_raises
from numpy.testing.decorators import skipif
import pandas as pd
try:
import matplotlib.pyplot as plt
missing_matplotlib = False
except ImportError:
missing_matplotlib = True
from statsmodels.tools.pca import PCA
from statsmodels.tools.tests.results.datamlw import data, princomp1, princomp2
from statsmodels.compat.numpy import nanmean
DECIMAL_5 = .00001
class TestPCA(TestCase):
@classmethod
def setUpClass(cls):
rs = np.random.RandomState()
rs.seed(1234)
k = 3
n = 100
t = 200
lam = 2
norm_rng = rs.standard_normal
e = norm_rng((t, n))
f = norm_rng((t, k))
b = rs.standard_gamma(lam, size=(k, n)) / lam
cls.x = f.dot(b) + e
cls.x_copy = cls.x + 0.0
cls.rs = rs
k = 3
n = 300
t = 200
lam = 2
norm_rng = rs.standard_normal
e = norm_rng((t, n))
f = norm_rng((t, k))
b = rs.standard_gamma(lam, size=(k, n)) / lam
cls.x_wide = f.dot(b) + e
@skipif(missing_matplotlib)
def test_smoke_plot_and_repr(self):
pc = PCA(self.x)
fig = pc.plot_scree()
fig = pc.plot_scree(ncomp=10)
fig = pc.plot_scree(log_scale=False)
fig = pc.plot_scree(cumulative=True)
fig = pc.plot_rsquare()
fig = pc.plot_rsquare(ncomp=5)
# Additional smoke test
pc.__repr__()
pc = PCA(self.x, standardize=False)
pc.__repr__()
pc = PCA(self.x, standardize=False, demean=False)
pc.__repr__()
# Check data for no changes
assert_equal(self.x, pc.data)
def test_eig_svd_equiv(self):
"""
Test leading components since the tail end can differ
"""
pc_eig = PCA(self.x)
pc_svd = PCA(self.x, method='svd')
assert_allclose(pc_eig.projection, pc_svd.projection)
assert_allclose(np.abs(pc_eig.factors[:, :2]),
np.abs(pc_svd.factors[:, :2]))
assert_allclose(np.abs(pc_eig.coeff[:2, :]),
np.abs(pc_svd.coeff[:2, :]))
assert_allclose(pc_eig.eigenvals,
pc_svd.eigenvals)
assert_allclose(np.abs(pc_eig.eigenvecs[:, :2]),
np.abs(pc_svd.eigenvecs[:, :2]))
pc_svd = PCA(self.x, method='svd', ncomp=2)
pc_nipals = PCA(self.x, method='nipals', ncomp=2)
assert_allclose(np.abs(pc_nipals.factors),
np.abs(pc_svd.factors),
atol=DECIMAL_5)
assert_allclose(np.abs(pc_nipals.coeff),
np.abs(pc_svd.coeff),
atol=DECIMAL_5)
assert_allclose(pc_nipals.eigenvals,
pc_svd.eigenvals,
atol=DECIMAL_5)
assert_allclose(np.abs(pc_nipals.eigenvecs),
np.abs(pc_svd.eigenvecs),
atol=DECIMAL_5)
# Check data for no changes
assert_equal(self.x, pc_svd.data)
# Check data for no changes
assert_equal(self.x, pc_eig.data)
# Check data for no changes
assert_equal(self.x, pc_nipals.data)
def test_options(self):
pc = PCA(self.x)
pc_no_norm = PCA(self.x, normalize=False)
assert_allclose(pc.factors.dot(pc.coeff),
pc_no_norm.factors.dot(pc_no_norm.coeff))
princomp = pc.factors
assert_allclose(princomp.T.dot(princomp), np.eye(100), atol=1e-5)
weights = pc_no_norm.coeff
assert_allclose(weights.T.dot(weights), np.eye(100), atol=1e-5)
pc_10 = PCA(self.x, ncomp=10)
assert_allclose(pc.factors[:, :10], pc_10.factors)
assert_allclose(pc.coeff[:10, :], pc_10.coeff)
assert_allclose(pc.rsquare[:(10 + 1)], pc_10.rsquare)
assert_allclose(pc.eigenvals[:10], pc_10.eigenvals)
assert_allclose(pc.eigenvecs[:, :10], pc_10.eigenvecs)
pc = PCA(self.x, standardize=False, normalize=False)
mu = self.x.mean(0)
xdm = self.x - mu
xpx = xdm.T.dot(xdm)
val, vec = np.linalg.eigh(xpx)
ind = np.argsort(val)
ind = ind[::-1]
val = val[ind]
vec = vec[:, ind]
assert_allclose(xdm, pc.transformed_data)
assert_allclose(val, pc.eigenvals)
assert_allclose(np.abs(vec), np.abs(pc.eigenvecs))
assert_allclose(np.abs(pc.factors), np.abs(xdm.dot(vec)))
assert_allclose(pc.projection, xdm + mu)
pc = PCA(self.x, standardize=False, demean=False, normalize=False)
x = self.x
xpx = x.T.dot(x)
val, vec = np.linalg.eigh(xpx)
ind = np.argsort(val)
ind = ind[::-1]
val = val[ind]
vec = vec[:, ind]
assert_allclose(x, pc.transformed_data)
assert_allclose(val, pc.eigenvals)
assert_allclose(np.abs(vec), np.abs(pc.eigenvecs))
assert_allclose(np.abs(pc.factors), np.abs(x.dot(vec)))
def test_against_reference(self):
"""
Test against MATLAB, which by default demeans but does not standardize
"""
x = data.xo / 1000.0
pc = PCA(x, normalize=False, standardize=False)
ref = princomp1
assert_allclose(np.abs(pc.factors), np.abs(ref.factors))
assert_allclose(pc.factors.dot(pc.coeff) + x.mean(0), x)
assert_allclose(np.abs(pc.coeff), np.abs(ref.coef.T))
assert_allclose(pc.factors.dot(pc.coeff),
ref.factors.dot(ref.coef.T))
pc = PCA(x[:20], normalize=False, standardize=False)
mu = x[:20].mean(0)
ref = princomp2
assert_allclose(np.abs(pc.factors), np.abs(ref.factors))
assert_allclose(pc.factors.dot(pc.coeff) + mu, x[:20])
assert_allclose(np.abs(pc.coeff), np.abs(ref.coef.T))
assert_allclose(pc.factors.dot(pc.coeff),
ref.factors.dot(ref.coef.T))
def test_warnings_and_errors(self):
with warnings.catch_warnings(record=True) as w:
pc = PCA(self.x, ncomp=300)
assert_equal(len(w), 1)
with warnings.catch_warnings(record=True) as w:
rs = self.rs
x = rs.standard_normal((200, 1)) * np.ones(200)
pc = PCA(x, method='eig')
assert_equal(len(w), 1)
assert_raises(ValueError, PCA, self.x, method='unknown')
assert_raises(ValueError, PCA, self.x, missing='unknown')
assert_raises(ValueError, PCA, self.x, tol=2.0)
assert_raises(ValueError, PCA, np.nan * np.ones((200,100)), tol=2.0)
@skipif(missing_matplotlib)
def test_pandas(self):
pc = PCA(pd.DataFrame(self.x))
pc1 = PCA(self.x)
assert_equal(pc.factors.values, pc1.factors)
fig = pc.plot_scree()
fig = pc.plot_scree(ncomp=10)
fig = pc.plot_scree(log_scale=False)
fig = pc.plot_rsquare()
fig = pc.plot_rsquare(ncomp=5)
proj = pc.project(2)
PCA(pd.DataFrame(self.x), ncomp=4, gls=True)
PCA(pd.DataFrame(self.x), ncomp=4, standardize=False)
def test_gls_and_weights(self):
assert_raises(ValueError, PCA, self.x, gls=True)
assert_raises(ValueError, PCA, self.x, weights=np.array([1.0, 1.0]))
# Pre-standardize to make comparison simple
x = (self.x - self.x.mean(0))
x = x / (x ** 2.0).mean(0)
pc_gls = PCA(x, ncomp=1, standardize=False, demean=False, gls=True)
pc = PCA(x, ncomp=1, standardize=False, demean=False)
errors = x - pc.projection
var = (errors ** 2.0).mean(0)
weights = 1.0 / var
weights = weights / np.sqrt((weights ** 2.0).mean())
assert_allclose(weights, pc_gls.weights)
assert_equal(x, pc_gls.data)
assert_equal(x, pc.data)
pc_weights = PCA(x, ncomp=1, standardize=False, demean=False, weights=weights)
assert_allclose(weights, pc_weights.weights)
assert_allclose(np.abs(pc_weights.factors), np.abs(pc_gls.factors))
def test_wide(self):
pc = PCA(self.x_wide)
assert_equal(pc.factors.shape[1], self.x_wide.shape[0])
assert_equal(pc.eigenvecs.shape[1], min(np.array(self.x_wide.shape)))
pc = PCA(pd.DataFrame(self.x_wide))
assert_equal(pc.factors.shape[1], self.x_wide.shape[0])
assert_equal(pc.eigenvecs.shape[1], min(np.array(self.x_wide.shape)))
def test_projection(self):
pc = PCA(self.x, ncomp=5)
mu = self.x.mean(0)
demean_x = self.x - mu
coef = np.linalg.pinv(pc.factors).dot(demean_x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct + mu)
pc = PCA(self.x, standardize=False, ncomp=5)
coef = np.linalg.pinv(pc.factors).dot(demean_x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct + mu)
pc = PCA(self.x, standardize=False, demean=False, ncomp=5)
coef = np.linalg.pinv(pc.factors).dot(self.x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct)
pc = PCA(self.x, ncomp=5, gls=True)
mu = self.x.mean(0)
demean_x = self.x - mu
coef = np.linalg.pinv(pc.factors).dot(demean_x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct + mu)
pc = PCA(self.x, standardize=False, ncomp=5)
coef = np.linalg.pinv(pc.factors).dot(demean_x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct + mu)
pc = PCA(self.x, standardize=False, demean=False, ncomp=5, gls=True)
coef = np.linalg.pinv(pc.factors).dot(self.x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct)
# Test error for too many factors
project = pc.project
assert_raises(ValueError, project, 6)
def test_replace_missing(self):
x = self.x.copy()
x[::5, ::7] = np.nan
pc = PCA(x, missing='drop-row')
x_dropped_row = x[np.logical_not(np.any(np.isnan(x), 1))]
pc_dropped = PCA(x_dropped_row)
assert_equal(pc.projection, pc_dropped.projection)
assert_equal(x, pc.data)
pc = PCA(x, missing='drop-col')
x_dropped_col = x[:, np.logical_not(np.any(np.isnan(x), 0))]
pc_dropped = PCA(x_dropped_col)
assert_equal(pc.projection, pc_dropped.projection)
assert_equal(x, pc.data)
pc = PCA(x, missing='drop-min')
if x_dropped_row.size > x_dropped_col.size:
x_dropped_min = x_dropped_row
else:
x_dropped_min = x_dropped_col
pc_dropped = PCA(x_dropped_min)
assert_equal(pc.projection, pc_dropped.projection)
assert_equal(x, pc.data)
pc = PCA(x, ncomp=3, missing='fill-em')
missing = np.isnan(x)
mu = nanmean(x, axis=0)
errors = x - mu
sigma = np.sqrt(nanmean(errors ** 2, axis=0))
x_std = errors / sigma
x_std[missing] = 0.0
last = x_std[missing]
delta = 1.0
count = 0
while delta > 5e-8:
pc_temp = PCA(x_std, ncomp=3, standardize=False, demean=False)
x_std[missing] = pc_temp.projection[missing]
current = x_std[missing]
diff = current - last
delta = np.sqrt(np.sum(diff ** 2)) / np.sqrt(np.sum(current ** 2))
last = current
count += 1
x = self.x + 0.0
projection = pc_temp.projection * sigma + mu
x[missing] = projection[missing]
assert_allclose(pc._adjusted_data, x)
# Check data for no changes
assert_equal(self.x, self.x_copy)
x = self.x
pc = PCA(x)
pc_dropped = PCA(x, missing='drop-row')
assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5)
pc_dropped = PCA(x, missing='drop-col')
assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5)
pc_dropped = PCA(x, missing='drop-min')
assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5)
pc = PCA(x, ncomp=3)
pc_dropped = PCA(x, ncomp=3, missing='fill-em')
assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5)
# Test too many missing for missing='fill-em'
x = self.x.copy()
x[:, :] = np.nan
assert_raises(ValueError, PCA, x, missing='drop-row')
assert_raises(ValueError, PCA, x, missing='drop-col')
assert_raises(ValueError, PCA, x, missing='drop-min')
assert_raises(ValueError, PCA, x, missing='fill-em')
def test_rsquare(self):
x = self.x + 0.0
mu = x.mean(0)
x_demean = x - mu
std = np.std(x, 0)
x_std = x_demean / std
pc = PCA(self.x)
nvar = x.shape[1]
rsquare = np.zeros(nvar + 1)
tss = np.sum(x_std ** 2)
for i in range(nvar + 1):
errors = x_std - pc.project(i, transform=False, unweight=False)
rsquare[i] = 1.0 - np.sum(errors ** 2) / tss
assert_allclose(rsquare, pc.rsquare)
pc = PCA(self.x, standardize=False)
tss = np.sum(x_demean ** 2)
for i in range(nvar + 1):
errors = x_demean - pc.project(i, transform=False, unweight=False)
rsquare[i] = 1.0 - np.sum(errors ** 2) / tss
assert_allclose(rsquare, pc.rsquare)
pc = PCA(self.x, standardize=False, demean=False)
tss = np.sum(x ** 2)
for i in range(nvar + 1):
errors = x - pc.project(i, transform=False, unweight=False)
rsquare[i] = 1.0 - np.sum(errors ** 2) / tss
assert_allclose(rsquare, pc.rsquare)
|
bsd-3-clause
|
alejandro-mc/trees
|
boxPlot.py
|
1
|
2299
|
import glob
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
__stats__ = []
def gatherStats(filenames,normalized=False):
global __stats__
#for each file open the file add stats for each line and step
for filename in filenames:
#check for norm files and load values to list
normsfile_name = filename + '.norms'
if os.path.isfile(normsfile_name) and normalized:
def norm_stream():
with open(normsfile_name,'r') as nrmfile:
for line in nrmfile:
trimmed = line[0:-1]
norms = list(map(lambda x : float(x) ,trimmed.split(',')))
for i in range(len(norms)):
yield norms[i]
else:
def norm_stream():
while True:
yield 1
with open(filename,'r') as sprfile:
for line in sprfile:
trimmed = line[0:-1]
distances = list(map(lambda x : float(x) ,trimmed.split(',')))
for i in range(len(distances)):
#add distance data to stats
#for now we just add to the list
#later will contain min max and histogram to handle
#large ammounts of data efficiently
if i < len(__stats__):
__stats__[i].append(distances[i]/ next(norm_stream()))
else:
__stats__.append([distances[i] / next(norm_stream())])
def firstN(n):
return __stats__[0:n]
def plotWalks():
plt.boxplot(__stats__,manage_xticks=False)
plt.show()
if __name__=='__main__':
if len(sys.argv)<3:
print ("Too few arguments!!")
print ("Usage: [-n] <prefix> <no. leaves>")
sys.exit(-1)
normalized = False
if len(sys.argv) == 4:
normalized = sys.argv.pop(1) == '-n'
spr_files = glob.glob( sys.argv[1] + "_" + sys.argv[2] + "_*")
spr_files = list(filter(lambda x: '.norms' not in x,spr_files))
for i in spr_files:
print("gathering data from: " + i)
gatherStats(spr_files,normalized)
#do domething with the stats
plotWalks()
|
mit
|
wasit7/book_pae
|
pae/forcast/src/sqlite/student_sqlite.py
|
1
|
1047
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 22:32:19 2016
@author: Administrator
"""
#import sqlite3
#
#conn = sqlite3.connect('student.sqlite')
#print "Opened database successfully";
#
#conn.execute('''CREATE TABLE Student
# (Sub_id NOT NULL,
# Sub_name TEXT NOT NULL,
# description TEXT,
# credit INT);''')
#print "Table created successfully";
#
#conn.close()
import pandas as pd
import pandas.io.sql as pd_sql
import sqlite3 as sql
df_file = pd.read_csv('../data/CS_table_No2_No4_new.csv',delimiter=";", skip_blank_lines = True,
error_bad_lines=False,encoding='utf8')
df = {'Sub_id':df_file['COURSEID'],'Sub_name':df_file['COURSENAME'],'credit':df_file['CREDIT']}
df_a = pd.DataFrame(df)
con = sql.connect("student.sqlite")
#df = pd.DataFrame({'TestData': [1, 2, 3, 4, 5, 6, 7, 8, 9]}, dtype='float')
pd_sql.write_frame(df_a, name='Student',if_exists="append", con=con)
con.close()
|
mit
|
vzantedeschi/L3SVMs
|
cross_validation.py
|
1
|
2236
|
import time
import statistics
from sklearn.model_selection import KFold
from src.l3svms import *
from src.utils import *
args = get_args(__file__,False)
TRAIN = args.train_file
LAND = args.nb_landmarks # default 10
CLUS = args.nb_clusters # default 1
NORM = args.norm # default False
LIN = args.linear # default True
PCA_BOOL = args.pca # default False
ITER = args.nb_iterations # default 1
VERB = args.verbose # default False
YPOS = args.y_pos # default 0
CV = args.nb_cv # default 5
verboseprint = print if VERB else lambda *a, **k: None
verboseprint("{}-fold cross-validation on {}: {} clusters, {} landmarks".format(CV,TRAIN,CLUS,LAND))
if LIN:
verboseprint("linear kernel")
else:
verboseprint("rbf kernel")
if NORM:
verboseprint("normalized dataset")
else:
verboseprint("scaled data")
t1 = time.time()
# load dataset
try:
Y,X = load_sparse_dataset(TRAIN,norm=NORM,y_pos=YPOS)
except:
Y,X = load_dense_dataset(TRAIN,norm=NORM,y_pos=YPOS)
Y = np.asarray(Y)
t2 = time.time()
verboseprint("dataset loading time:",t2-t1,"s")
if PCA_BOOL:
if LAND > train_x.shape[1]:
raise Exception("When using PCA, the nb landmarks must be at most the nb of features")
verboseprint("landmarks = principal components")
else:
verboseprint("random landmarks")
verboseprint("--------------------\n")
cross_acc_list = []
cross_time_list = []
for it in range(ITER):
splitter = KFold(n_splits=CV,shuffle=True,random_state=it)
splitter.get_n_splits(X)
acc_list,time_list = [],[]
for train_index,test_index in splitter.split(X):
train_x,test_x = X[train_index],X[test_index]
train_y,test_y = Y[train_index].tolist(),Y[test_index].tolist()
acc,time = learning(train_x,train_y,test_x,test_y,verboseprint,CLUS,PCA_BOOL,LIN,LAND)
acc_list.append(acc)
time_list.append(time)
cross_time_list.append(statistics.mean(time_list))
cross_acc_list.append(statistics.mean(acc_list))
print("Mean accuracy (%), mean stdev (%), mean time (s) over {} iterations:".format(ITER))
try:
print(statistics.mean(cross_acc_list),statistics.stdev(cross_acc_list),statistics.mean(cross_time_list))
except:
print(cross_acc_list[0],0.,cross_time_list[0])
|
mit
|
ccauet/scikit-optimize
|
skopt/tests/test_gp_opt.py
|
1
|
2443
|
from itertools import product
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_less
import pytest
from skopt import gp_minimize
from skopt.benchmarks import bench1
from skopt.benchmarks import bench2
from skopt.benchmarks import bench3
from skopt.benchmarks import bench4
from skopt.benchmarks import branin
from skopt.learning import GaussianProcessRegressor
def check_minimize(func, y_opt, bounds, acq_optimizer, acq_func,
margin, n_calls):
r = gp_minimize(func, bounds, acq_optimizer=acq_optimizer,
acq_func=acq_func,
n_calls=n_calls, random_state=1,
noise=1e-10)
assert_less(r.fun, y_opt + margin)
SEARCH_AND_ACQ = list(product(["sampling", "lbfgs"], ["LCB", "EI"]))
@pytest.mark.slow_test
@pytest.mark.parametrize("search, acq", SEARCH_AND_ACQ)
def test_gp_minimize_bench1(search, acq):
check_minimize(bench1, 0.,
[(-2.0, 2.0)], search, acq, 0.05, 50)
@pytest.mark.slow_test
@pytest.mark.parametrize("search, acq", SEARCH_AND_ACQ)
def test_gp_minimize_bench2(search, acq):
check_minimize(bench2, -5,
[(-6.0, 6.0)], search, acq, 0.05, 75)
@pytest.mark.slow_test
@pytest.mark.parametrize("search, acq", SEARCH_AND_ACQ)
def test_gp_minimize_bench3(search, acq):
check_minimize(bench3, -0.9,
[(-2.0, 2.0)], search, acq, 0.05, 50)
@pytest.mark.fast_test
@pytest.mark.parametrize("search, acq", SEARCH_AND_ACQ)
def test_gp_minimize_bench4(search, acq):
check_minimize(bench4, 0.0,
[("-2", "-1", "0", "1", "2")], search, acq, 0.05, 10)
@pytest.mark.fast_test
def test_n_jobs():
r_single = gp_minimize(bench3, [(-2.0, 2.0)], acq_optimizer="lbfgs",
acq_func="EI", n_calls=2, n_random_starts=1,
random_state=1, noise=1e-10)
r_double = gp_minimize(bench3, [(-2.0, 2.0)], acq_optimizer="lbfgs",
acq_func="EI", n_calls=2, n_random_starts=1,
random_state=1, noise=1e-10, n_jobs=2)
assert_array_equal(r_single.x_iters, r_double.x_iters)
@pytest.mark.fast_test
def test_gpr_default():
"""Smoke test that gp_minimize does not fail for default values."""
gpr = GaussianProcessRegressor()
res = gp_minimize(
branin, ((-5.0, 10.0), (0.0, 15.0)), n_random_starts=1, n_calls=2)
|
bsd-3-clause
|
adpozuelo/Master
|
RC/PEC2/ba.py
|
1
|
3491
|
## RC - UOC - URV - PEC2
## [email protected]
## Barabási & Albert (BA)
## run with 'python3 ba.py'
import networkx as nx
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import scipy as sc
import random
import math
random.seed(1)
def create_network(n, m):
m0 = 20
G = nx.Graph()
for ni in range(1, m0 + 1):
G.add_node(ni)
for nj in range(1, ni):
if nj != ni:
G.add_edge(ni, nj)
nconnected = m0 + 1
gsize = G.size(weight = 'weight')
for ni in range(m0 + 1, n + 1):
for mi in range(1, m + 1):
attached = False
while not attached:
for nj in range(1, nconnected):
if ni != nj and not G.has_edge(ni, nj):
p = G.degree(nj, weight = 'weight') / gsize
if random.uniform(0, 1) < p:
G.add_edge(ni, nj)
attached = True
gsize += 1
break
nconnected += 1
nx.draw_networkx(G, node_size = 4, with_labels = False)
plt.title('n = ' + str(n) + ', m = ' + str(m))
filename = 'ba_n' + str(n) + '_m' + str(m) + '_net.png'
plt.savefig(filename)
# plt.show()
plt.clf()
histo = nx.degree_histogram(G)
total = sum(histo)
norm_histo = np.divide(histo, total)
length = len(norm_histo)
kn = np.arange(length)
knm = np.add(kn, m)
plt.plot(kn, norm_histo, 'r-', label = 'empirical')
exponential = np.empty(length)
for k in range(0, length):
exponential[k] = (k + 1) ** (-3)
total = sum(exponential)
norm_exponential = np.divide(exponential, total)
plt.plot(knm, norm_exponential, 'b-', label = 'exponential(-3)')
plt.title('n = ' + str(n) + ', m = ' + str(m))
plt.xlabel('Grado k')
plt.ylabel('Fracción de nodos')
plt.legend(loc = 1)
filename = 'ba_n' + str(n) + '_m' + str(m) + '_dg.png'
plt.savefig(filename)
# plt.show()
plt.clf()
if n >= 1000:
plt.plot(kn, norm_histo, 'r-', label = 'empirical')
plt.plot(knm, norm_exponential, 'b-', label = 'exponential(-3)')
plt.title('n = ' + str(n) + ', m = ' + str(m) + ' (log-log)')
plt.xlabel('Grado k')
plt.ylabel('Fracción de nodos')
plt.xscale('log')
plt.yscale('log')
plt.legend(loc = 3)
filename = 'ba_n' + str(n) + '_m' + str(m) + '_dg_log_log.png'
plt.savefig(filename)
# plt.show()
plt.clf()
plt.bar(kn, histo, align='center', label = 'empirical')
plt.title('n = ' + str(n) + ', m = ' + str(m) + ' (bar-log-log)')
plt.xlabel('Grado k')
plt.ylabel('Fracción de nodos')
plt.xscale('log')
plt.yscale('log')
plt.legend(loc = 1)
filename = 'ba_n' + str(n) + '_m' + str(m) + '_dg_bar_log_log.png'
plt.savefig(filename)
# plt.show()
plt.clf()
filename = 'ba_n' + str(n) + '_m' + str(m) + '_dg.txt'
file = open(filename, 'w')
for i in range(length):
file.write(str(i) + ' ' + str(histo[i]) + '\n')
file.close()
return
n = [50, 100, 1000, 10000]
m = [1, 2, 4, 10]
for ni in n:
for mi in m:
create_network(ni, mi)
|
gpl-3.0
|
CforED/Machine-Learning
|
sklearn/tests/test_learning_curve.py
|
59
|
10869
|
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
|
bsd-3-clause
|
shikhardb/scikit-learn
|
examples/decomposition/plot_ica_blind_source_separation.py
|
349
|
2228
|
"""
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.