repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
n-west/gnuradio
|
gr-filter/examples/chirp_channelize.py
|
58
|
7169
|
#!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 200000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._fs, 500, 20,
attenuation_dB=10,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
repeated = True
if(repeated):
self.vco_input = analog.sig_source_f(self._fs, analog.GR_SIN_WAVE, 0.25, 110)
else:
amp = 100
data = scipy.arange(0, amp, amp/float(self._N))
self.vco_input = blocks.vector_source_f(data, False)
# Build a VCO controlled by either the sinusoid or single chirp tone
# Then convert this to a complex signal
self.vco = blocks.vco_f(self._fs, 225, 1)
self.f2c = blocks.float_to_complex()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.vco_input, self.vco, self.f2c)
self.connect(self.f2c, self.head, self.pfb)
self.connect(self.f2c, self.snk_i)
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
fig3 = pylab.figure(4, figsize=(16,9), facecolor="w")
Ns = 650
Ne = 20000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = freq
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
sp3 = fig3.add_subplot(1,1,1)
p3 = sp3.plot(t_o, x_o.real)
sp3.set_xlim([min(t_o), max(t_o)+1])
sp3.set_ylim([-2, 2])
sp3.set_title("All Channels")
sp3.set_xlabel("Time (s)")
sp3.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
dl1ksv/gnuradio
|
gr-filter/examples/synth_filter.py
|
6
|
1806
|
#!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
for fi in freqs:
s = analog.sig_source_c(fs, analog.GR_SIN_WAVE, fi, 1)
sigs.append(s)
taps = filter.firdes.low_pass_2(len(freqs), fs,
fs/float(nchans)/2, 100, 100)
print("Num. Taps = %d (taps per filter = %d)" % (len(taps),
len(taps) / nchans))
filtbank = filter.pfb_synthesizer_ccf(nchans, taps)
head = blocks.head(gr.sizeof_gr_complex, N)
snk = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(filtbank, head, snk)
for i,si in enumerate(sigs):
tb.connect(si, (filtbank, i))
tb.run()
if 1:
f1 = pyplot.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(snk.data()[1000:])
fftlen = 2048
f2 = pyplot.figure(2)
s2 = f2.add_subplot(1,1,1)
winfunc = numpy.blackman
s2.psd(snk.data()[10000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
pyplot.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
wkfwkf/statsmodels
|
statsmodels/sandbox/tsa/examples/ex_mle_garch.py
|
31
|
10676
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 01:01:50 2010
Author: josef-pktd
latest result
-------------
all are very close
garch0 has different parameterization of constant
ordering of parameters is different
seed 2780185
h.shape (2000,)
Optimization terminated successfully.
Current function value: 2093.813397
Iterations: 387
Function evaluations: 676
ggres.params [-0.6146253 0.1914537 0.01039355 0.78802188]
Optimization terminated successfully.
Current function value: 2093.972953
Iterations: 201
Function evaluations: 372
ggres0.params [-0.61537527 0.19635128 4.00706058]
Warning: Desired error not necessarily achieveddue to precision loss
Current function value: 2093.972953
Iterations: 51
Function evaluations: 551
Gradient evaluations: 110
ggres0.params [-0.61537855 0.19635265 4.00694669]
Optimization terminated successfully.
Current function value: 2093.751420
Iterations: 103
Function evaluations: 187
[ 0.78671519 0.19692222 0.61457171]
-2093.75141963
Final Estimate:
LLH: 2093.750 norm LLH: 2.093750
omega alpha1 beta1
0.7867438 0.1970437 0.6145467
long run variance comparison
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
R
>>> 0.7867438/(1- 0.1970437- 0.6145467)
4.1757097302897526
Garch (gjr) asymetric, longrun var ?
>>> 1/(1-0.6146253 - 0.1914537 - 0.01039355) * 0.78802188
4.2937548579245242
>>> 1/(1-0.6146253 - 0.1914537 + 0.01039355) * 0.78802188
3.8569053452140345
Garch0
>>> (1-0.61537855 - 0.19635265) * 4.00694669
0.7543830449902722
>>> errgjr4.var() #for different random seed
4.0924199964716106
todo: add code and verify, check for longer lagpolys
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import numdifftools as ndt
import statsmodels.api as sm
from statsmodels.sandbox import tsa
from statsmodels.sandbox.tsa.garch import * # local import
nobs = 1000
examples = ['garch', 'rpyfit']
if 'garch' in examples:
err,h = generate_kindofgarch(nobs, [1.0, -0.95], [1.0, 0.1], mu=0.5)
plt.figure()
plt.subplot(211)
plt.plot(err)
plt.subplot(212)
plt.plot(h)
#plt.show()
seed = 3842774 #91234 #8837708
seed = np.random.randint(9999999)
print('seed', seed)
np.random.seed(seed)
ar1 = -0.9
err,h = generate_garch(nobs, [1.0, ar1], [1.0, 0.50], mu=0.0,scale=0.1)
# plt.figure()
# plt.subplot(211)
# plt.plot(err)
# plt.subplot(212)
# plt.plot(h)
# plt.figure()
# plt.subplot(211)
# plt.plot(err[-400:])
# plt.subplot(212)
# plt.plot(h[-400:])
#plt.show()
garchplot(err, h)
garchplot(err[-400:], h[-400:])
np.random.seed(seed)
errgjr,hgjr, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.5,0]], mu=0.0,scale=0.1)
garchplot(errgjr[:nobs], hgjr[:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
garchplot(errgjr[-400:nobs], hgjr[-400:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
np.random.seed(seed)
errgjr2,hgjr2, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr2[:nobs], hgjr2[:nobs], 'GJR-GARCH(1,1) Simulation')
garchplot(errgjr2[-400:nobs], hgjr2[-400:nobs], 'GJR-GARCH(1,1) Simulation')
np.random.seed(seed)
errgjr3,hgjr3, etax3 = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9],[0.1,0.9],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr3[:nobs], hgjr3[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr3[-400:nobs], hgjr3[-400:nobs], 'GJR-GARCH(1,3) Simulation')
np.random.seed(seed)
errgjr4,hgjr4, etax4 = generate_gjrgarch(nobs, [1.0, ar1],
[[1., 1,0],[0, 0.1,0.9],[0, 0.1,0.9],[0, 0.1,0.9]],
mu=0.0,scale=0.1)
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -0.],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
garchplot(errgjr5[:20], hgjr5[:20], 'GJR-GARCH(1,3) Simulation')
#garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
#plt.show()
seed = np.random.randint(9999999) # 9188410
print('seed', seed)
x = np.arange(20).reshape(10,2)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
nobs = 1000
warmup = 1000
np.random.seed(seed)
ar = [1.0, -0.7]#7, -0.16, -0.1]
#ma = [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]]
ma = [[1., 0, 0],[0, 0.8,0.0]] #,[0, 0.9,0.0]]
# errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, [1.0, -0.99],
# [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]],
# mu=0.2, scale=0.25)
errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, ar, ma,
mu=0.4, scale=1.01)
errgjr4,hgjr4, etax4 = errgjr4[warmup:], hgjr4[warmup:], etax4[warmup:]
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation - DGP')
ggmod = Garch(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print('ggres.params', ggres.params)
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print('ggres0.params', ggres0.params)
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, method='bfgs', maxiter=2000)
print('ggres0.params', ggres0.params)
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4-errgjr4.mean())[0], [0.93, 0.9, 0.2])
print(g11res)
llf = loglike_GARCH11(g11res, errgjr4-errgjr4.mean())
print(llf[0])
if 'rpyfit' in examples:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = errgjr4-errgjr4.mean(), include_mean=False)
if 'rpysim' in examples:
from rpy import r
f = r.formula('~garch(1, 1)')
#fit = r.garchFit(f, data = errgjr4)
x = r.garchSim( n = 500)
print('R acf', tsa.acf(np.power(x,2))[:15])
arma3 = Arma(np.power(x,2))
arma3res = arma3.fit(start_params=[-0.2,0.1,0.5],maxiter=5000)
print(arma3res.params)
arma3b = Arma(np.power(x,2))
arma3bres = arma3b.fit(start_params=[-0.2,0.1,0.5],maxiter=5000, method='bfgs')
print(arma3bres.params)
xr = r.garchSim( n = 100)
x = np.asarray(xr)
ggmod = Garch(x-x.mean())
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print('ggres.params', ggres.params)
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, x-x.mean())[0], [0.6, 0.6, 0.2])
print(g11res)
llf = loglike_GARCH11(g11res, x-x.mean())
print(llf[0])
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
fit = r.garchFit(f, data = x-x.mean(), include_mean=False, trace=False)
print(r.summary(fit))
'''based on R default simulation
model = list(omega = 1e-06, alpha = 0.1, beta = 0.8)
nobs = 1000
(with nobs=500, gjrgarch doesn't do well
>>> ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
Optimization terminated successfully.
Current function value: -448.861335
Iterations: 385
Function evaluations: 690
>>> print('ggres.params', ggres.params
ggres.params [ -7.75090330e-01 1.57714749e-01 -9.60223930e-02 8.76021411e-07]
rearranged
8.76021411e-07 1.57714749e-01(-9.60223930e-02) 7.75090330e-01
>>> print(g11res
[ 2.97459808e-06 7.83128600e-01 2.41110860e-01]
>>> llf = loglike_GARCH11(g11res, x-x.mean())
>>> print(llf[0]
442.603541936
Log Likelihood:
-448.9376 normalized: -4.489376
omega alpha1 beta1
1.01632e-06 1.02802e-01 7.57537e-01
'''
''' the following is for errgjr4-errgjr4.mean()
ggres.params [-0.54510407 0.22723132 0.06482633 0.82325803]
Final Estimate:
LLH: 2065.56 norm LLH: 2.06556
mu omega alpha1 beta1
0.07229732 0.83069480 0.26313883 0.53986167
ggres.params [-0.50779163 0.2236606 0.00700036 1.154832
Final Estimate:
LLH: 2116.084 norm LLH: 2.116084
mu omega alpha1 beta1
-4.759227e-17 1.145404e+00 2.288348e-01 5.085949e-01
run3
DGP
0.4/?? 0.8 0.7
gjrgarch:
ggres.params [-0.45196579 0.2569641 0.02201904 1.11942636]
rearranged
const/omega ma1/alpha1 ar1/beta1
1.11942636 0.2569641(+0.02201904) 0.45196579
g11:
[ 1.10262688 0.26680468 0.45724957]
-2055.73912687
R:
Final Estimate:
LLH: 2055.738 norm LLH: 2.055738
mu omega alpha1 beta1
-1.665226e-17 1.102396e+00 2.668712e-01 4.573224e-01
fit = r.garchFit(f, data = errgjr4-errgjr4.mean())
rpy.RPy_RException: Error in solve.default(fit$hessian) :
Lapack routine dgesv: system is exactly singular
run4
DGP:
mu=0.4, scale=1.01
ma = [[1., 0, 0],[0, 0.8,0.0]], ar = [1.0, -0.7]
maybe something wrong with simulation
gjrgarch
ggres.params [-0.50554663 0.24449867 -0.00521004 1.00796791]
rearranged
1.00796791 0.24449867(-0.00521004) 0.50554663
garch11:
[ 1.01258264 0.24149155 0.50479994]
-2056.3877404
R include_constant=False
Final Estimate:
LLH: 2056.397 norm LLH: 2.056397
omega alpha1 beta1
1.0123560 0.2409589 0.5049154
'''
erro,ho, etaxo = generate_gjrgarch(20, ar, ma, mu=0.04, scale=0.01,
varinnovation = np.ones(20))
if 'sp500' in examples:
import tabular as tb
import scikits.timeseries as ts
a = tb.loadSV(r'C:\Josef\work-oth\gspc_table.csv')
s = ts.time_series(a[0]['Close'][::-1],
dates=ts.date_array(a[0]['Date'][::-1],freq="D"))
sp500 = a[0]['Close'][::-1]
sp500r = np.diff(np.log(sp500))
#plt.show()
|
bsd-3-clause
|
smjhnits/Praktikum_TU_D_16-17
|
Anfängerpraktikum/Protokolle/V503_Der_Milikan_Versuch/Python/auswertung.py
|
1
|
7943
|
import numpy as np
import math
from scipy.stats import sem
from uncertainties import ufloat
import uncertainties.unumpy as unp
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import scipy.constants as const
from uncertainties.unumpy import (nominal_values as noms, std_devs as stds)
from pint import UnitRegistry
u = UnitRegistry()
Q_ = u.Quantity
g = Q_(const.g, 'meter / (second**2)')
e_0 = Q_(const.e, 'coulomb')
dichte_oel = Q_(886, 'kilogram / meter**3')
d_Kondensator = Q_(ufloat(7.6250, 0.0051), 'millimeter')
print(const.e)
## Fitfunktionen
def poly(x, a, b, c):
return a * x**2 + b * x + c
def linear(x, a, b):
return a * x + b
## Messwerte
widerstand_gemessen = np.array([1.96, 1.92, 1.87, 1.81, 1.78, 1.71, 1.75, 1.75, 1.75, 1.75, 1.74, 1.74, 1.73, 1.73, 1.73, 1.72, 1.73, 1.72, 1.72, 1.72, 1.71, 1.71, 1.71, 1.71, 1.71, 1.7, 1.7, 1.71, 1.7, 1.7])
t_0 = np.array([17.78, 29.26, 35.03, 15.76, 34, 23.2, 15.41, 6.83, 11.4, 6.61, 9.93, 18, 19.26, 13.78, 9.4, 20.3, 11.56, 6.84, 13.58, 15.49, 8.49, 9.55, 8.13, 15.55, 9.03, 12.13, 16.18, 12.67, 6.95, 14.95])
U_gleichgewicht = Q_(np.array([269, 96, 11, 58, 35, 147, 77, 187, 200, 112, 118, 53, 61, 41, 134, 92, 122, 113, 50, 40, 76, 76, 175, 140, 192, 140, 113, 118, 155, 90]), 'volt')
temp = np.linspace(10, 39, 30)
Widerstand = np.array([3.239, 3.118, 3.004, 2.897, 2.795, 2.7, 2.61, 2.526, 2.446, 2.371, 2.3, 2.233, 2.169, 2.11, 2.053, 2, 1.95, 1.902, 1.857, 1.815, 1.774, 1.736, 1.7, 1.666, 1.634, 1.603, 1.574, 1.547, 1.521, 1.496])
## Temperatur
params_temp, covariance_temp = curve_fit(poly, Widerstand, temp)
print('Fitparameter Temperatur: ', 'ax**2 + bx + c', params_temp)
temp_gemessen = poly(widerstand_gemessen, *params_temp)
x_temp = np.linspace(1.65, 2, 1000)
plt.clf()
plt.plot(widerstand_gemessen, temp_gemessen, 'rx', label = r'Temperatur')
plt.plot(x_temp, poly(x_temp, *params_temp), 'b-', label = r'Fit')
plt.xlabel(r'Widerstand in M$\Omega$')
plt.ylabel('Temperatur in C')
plt.xlim(1.67, 1.97)
plt.legend(loc='best')
plt.tight_layout()
plt.savefig('Temp.pdf')
## Viskosität von Luft
pkt = np.array([[16, 30], [1.805, 1.882]]) # Punkte von der Geraden Abb. 3
params_visko, covaraince_visko = curve_fit(linear, pkt[0], pkt[1])
visko_gemessen = Q_(linear(temp_gemessen, *params_visko) * 10**(-5), 'newton * second * meters**(-2)')
## Geschwindigkeit ohne E-Feld
v_0 = Q_(0.5/ t_0, 'millimeter / second') ## mm pro s
## Radius der Öltröpfchen ## dichte Luft vernachlässigen
r_oel = np.sqrt(9 * visko_gemessen * v_0 / (2 * g * dichte_oel))
r_oel = r_oel.to('millimeter')
#print(r_oel)
## korrigierte Ladung
p = Q_(1.01325, 'bar') ## Druck während der Messung
B = Q_(6.17 * 10**(-3), 'torr * cm')
korrektur = (1 + B / (p * r_oel))**(-3/2) ## dimensionslos
##korrigierter Radius ###
r_korrigiert = (np.sqrt((B / (2 * p))**2 + 9 * visko_gemessen * v_0 / (2 * g * dichte_oel)) - B / (2 * p))
r_oel = r_korrigiert.to('millimeter')
E_feld = U_gleichgewicht / d_Kondensator
## Ladung bestimmen aus Kräftegleichgewicht
q = 4 * np.pi / 3 * dichte_oel * r_korrigiert**3 * g * 1 / E_feld
q = q.to('coulomb')
q_korrigiert = q #* korrektur
## Ladungen im Vergleich zur Elementarladung
plt.clf()
ax1 = plt.subplot(2, 1, 2)
plt.plot(range(1, len(noms(q_korrigiert.magnitude)) + 1), np.sort(noms(q_korrigiert.magnitude)), 'kx', label = r'Messdaten')
#plt.axhline(y = e_0.magnitude, color = 'r', linewidth = 2, label = 'Elementarladung')
plt.yticks([e_0.magnitude, 2 * e_0.magnitude, 3 * e_0.magnitude, 4 * e_0.magnitude, 5 * e_0.magnitude, 6 * e_0.magnitude, 7 * e_0.magnitude, 8 * e_0.magnitude, 9 * e_0.magnitude],
[r"$e_0$", r"$2e_0$", r"$3e_0$" ,r"$4e_0$", r"$5e_0$", r"$6e_0$", r"$7e_0$", r"$8e_0$", r"$9e_0$"])
plt.grid()
plt.xlabel('Messreihe')
plt.ylabel('Elementaladungen')
plt.legend(loc='best')
plt.tight_layout()
ax2 = plt.subplot(2, 1, 1, sharex = ax1)
plt.plot(range(1, len(noms(q_korrigiert.magnitude)) + 1), noms(q_korrigiert.magnitude), 'kx', label = r'Messdaten')
#plt.axhline(y = e_0.magnitude, color = 'r', linewidth = 2, label = 'Elementarladung')
plt.yticks([e_0.magnitude, 2 * e_0.magnitude, 3 * e_0.magnitude, 4 * e_0.magnitude, 5 * e_0.magnitude, 6 * e_0.magnitude, 7 * e_0.magnitude, 8 * e_0.magnitude, 9 * e_0.magnitude],
[r"$e_0$", r"$2e_0$", r"$3e_0$" ,r"$4e_0$", r"$5e_0$", r"$6e_0$", r"$7e_0$", r"$8e_0$", r"$9e_0$"])
plt.grid()
plt.ylabel('Elementaladungen')
plt.xlim(-1, 31)
plt.legend(loc='best')
plt.tight_layout()
#plt.show()
plt.savefig('Ladungen_E_0.pdf')
## Bestimmung der eigenen Elementarladung
## Abstand der Ladungen untereinander bestimmen. zu geringe Abstände
## weglassen, da Resultat aus Messungenauigkeit
##sortieren der Messdaten
q_sortiert = Q_(np.sort(q_korrigiert), 'coulomb')
q_sort = noms(q_sortiert.magnitude)
# aus Grafik
q_mittel = np.array([np.mean(q_sort[0:2]), np.mean(q_sort[3:7]), np.mean(q_sort[8:14]), np.mean(q_sort[15:19]), q_sort[20], np.mean(q_sort[21:24]), np.mean(q_sort[25:26]), q_sort[29], np.mean(q_sort[28:29])])
#print(np.argmin(abs(q_sortiert.magnitude / e_0.magnitude - 1)))
n = q_sortiert[0]
q_sortiert[0] = q_sortiert[2]
q_sortiert[2] = n
#def round_dist(q, q_test):
# test = np.zeros(len(q))
# for i in range(len(q) - 1):
# test[i] = np.round(q[i] / q_test)
# return sum(test)
#for i in range(len(noms(q_sortiert.magnitude)) - 1):
# if np.abs(q_sortiert[i + 1] - q_sortiert[i]) < e_0/2:
# q_sortiert[i + 1] = q_sortiert[i]
#best_value = np.zeros(len(noms(q_sortiert.magnitude)))
dist = 10**-19
e = const.e
q_test = np.linspace(e - dist, e + dist, len(q_sort)) ## testladungsarray
best_value = q_mittel
#for i in range(len(q_sort) - 1): ## raussortieren der besten gemessenen werte
# if np.abs(q_sort[i + 1] - q_sort[i]) > e_0.magnitude /2:
# best_value = np.append(best_value, q_sort[i])
q_best = np.array([])
for i in range(len(best_value)): ## ladung mit geringestem abstand ist der Kandidat für die elementarladung
q_best = np.append(q_best, np.abs(np.around(best_value[i] / q_test) - best_value[i] / q_test))
#if q_best = 0.000916822140607
#print(i, q_best[i], best_value[i])
print(len(q_best), len(best_value), len(q_test))
print(best_value)
print(q_best[np.argmin(q_best)], np.argmin(q_best), np.abs(np.around(best_value[1] / q_test[13]) - best_value[1] / q_test[13]))
print(q_test[13], best_value[1])
print('bester wert für e_0:', q_test[13])
print('absoluter Fehler: ', q_test[13] - const.e)
print('relativer Fehler: ', np.abs(q_test[13] - const.e) / const.e)
print(q_test[13] / e)
#for i in range(len(noms(q_sortiert.magnitude))):
# best_value[i] = round_dist(q_sortiert, q_sortiert[i])
#
#print(np.argmin(best_value))
### Avogadro-Konstante
## mit faradaykonstante
N_a = 96485.33289 / q_test[13]
print('Avogadro: ', N_a, const.N_A)
print('absoluter Fehler: ', const.N_A - N_a)
print('relativer Fehler: ', np.abs(N_a - const.N_A) / const.N_A)
print(N_a / const.N_A - 1)
plt.clf()
plt.plot(range(0, len(noms(q_sortiert.magnitude))), noms(q_sortiert.magnitude), 'kx', label = r'Messdaten')
#plt.axhline(y = e_0.magnitude, color = 'r', linewidth = 2, label = 'Elementarladung')
plt.yticks([e_0.magnitude, 2 * e_0.magnitude, 3 * e_0.magnitude, 4 * e_0.magnitude, 5 * e_0.magnitude, 6 * e_0.magnitude, 7 * e_0.magnitude, 8 * e_0.magnitude, 9 * e_0.magnitude],
[r"$e_0$", r"$2e_0$", r"$3e_0$" ,r"$4e_0$", r"$5e_0$", r"$6e_0$", r"$7e_0$", r"$8e_0$", r"$9e_0$"])
plt.grid()
plt.xlabel('Nummer der Messung')
plt.ylabel('Elementaladungen')
plt.xlim(-1, 31)
plt.legend(loc='best')
plt.tight_layout()
#plt.show()
#plt.savefig('sortierte_Ladungen_E_0.pdf')
## Messdaten auslagern
np.savetxt('Messdaten.txt', np.column_stack([widerstand_gemessen, t_0, U_gleichgewicht.magnitude, noms(r_oel.magnitude), stds(r_oel.magnitude), noms(q_korrigiert.magnitude), stds(q_korrigiert.magnitude)]), header = "widerstand t_0 U_g r_oel err q err")
|
mit
|
aditiiyer/CERR
|
CERR_core/ModelImplementationLibrary/SegmentationModels/ModelDependencies/CT_HeartSubStructures_DeepLab/dataloaders/utils.py
|
4
|
3279
|
import matplotlib.pyplot as plt
import numpy as np
import torch
def decode_seg_map_sequence(label_masks, dataset='heart'):
rgb_masks = []
for label_mask in label_masks:
rgb_mask = decode_segmap(label_mask, dataset)
rgb_masks.append(rgb_mask)
rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2]))
return rgb_masks
def decode_segmap(label_mask, dataset, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
if dataset == 'heart':
n_classes = 10
label_colours = get_heart_labels()
elif dataset == 'validation':
n_classes = 10
label_colours = get_heart_struct_labels()
elif dataset == 'heart_struct' or dataset == 'heart_peri' or dataset == 'heart_ventricles' or dataset == 'heart_atria':
n_classes = 2
label_colours = get_heart_labels()
elif dataset == 'validation_struct' or dataset == 'validation_peri' or dataset == 'validation_ventricles' or dataset == 'validation_atria':
n_classes = 2
label_colours = get_heart_struct_labels()
else:
raise NotImplementedError
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def encode_segmap(mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(get_heart_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def get_heart_labels():
# return np.array with dimensions (10,3)
# [0,1,2,3,4,5,6,7,8,9]
#['unlabelled', HEART', 'AORTA', 'LA', 'LV', 'RA', 'RV', 'IVC', 'SVC', 'PA']
return np.asarray([[0, 0, 0],
[128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0]])
def get_heart_struct_labels():
# return np.array with dimensions (2,3)
# [0,1]
#['unlabelled', HEART']
return np.asarray([[0, 0, 0],
[128, 0, 0]])
|
lgpl-2.1
|
fredhusser/scikit-learn
|
doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py
|
254
|
2253
|
"""Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
bsd-3-clause
|
YerevaNN/mimic3-benchmarks
|
mimic3benchmark/scripts/create_decompensation.py
|
1
|
4620
|
from __future__ import absolute_import
from __future__ import print_function
import os
import argparse
import numpy as np
import pandas as pd
from datetime import datetime
import random
random.seed(49297)
from tqdm import tqdm
def process_partition(args, partition, sample_rate=1.0, shortest_length=4.0,
eps=1e-6, future_time_interval=24.0):
output_dir = os.path.join(args.output_path, partition)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
xty_triples = []
patients = list(filter(str.isdigit, os.listdir(os.path.join(args.root_path, partition))))
for patient in tqdm(patients, desc='Iterating over patients in {}'.format(partition)):
patient_folder = os.path.join(args.root_path, partition, patient)
patient_ts_files = list(filter(lambda x: x.find("timeseries") != -1, os.listdir(patient_folder)))
stays_df = pd.read_csv(os.path.join(patient_folder, "stays.csv"))
for ts_filename in patient_ts_files:
with open(os.path.join(patient_folder, ts_filename)) as tsfile:
lb_filename = ts_filename.replace("_timeseries", "")
label_df = pd.read_csv(os.path.join(patient_folder, lb_filename))
# empty label file
if label_df.shape[0] == 0:
continue
mortality = int(label_df.iloc[0]["Mortality"])
los = 24.0 * label_df.iloc[0]['Length of Stay'] # in hours
if pd.isnull(los):
print("(length of stay is missing)", patient, ts_filename)
continue
stay = stays_df[stays_df.ICUSTAY_ID == label_df.iloc[0]['Icustay']]
deathtime = stay['DEATHTIME'].iloc[0]
intime = stay['INTIME'].iloc[0]
if pd.isnull(deathtime):
lived_time = 1e18
else:
lived_time = (datetime.strptime(deathtime, "%Y-%m-%d %H:%M:%S") -
datetime.strptime(intime, "%Y-%m-%d %H:%M:%S")).total_seconds() / 3600.0
ts_lines = tsfile.readlines()
header = ts_lines[0]
ts_lines = ts_lines[1:]
event_times = [float(line.split(',')[0]) for line in ts_lines]
ts_lines = [line for (line, t) in zip(ts_lines, event_times)
if -eps < t < los + eps]
event_times = [t for t in event_times
if -eps < t < los + eps]
# no measurements in ICU
if len(ts_lines) == 0:
print("(no events in ICU) ", patient, ts_filename)
continue
sample_times = np.arange(0.0, min(los, lived_time) + eps, sample_rate)
sample_times = list(filter(lambda x: x > shortest_length, sample_times))
# At least one measurement
sample_times = list(filter(lambda x: x > event_times[0], sample_times))
output_ts_filename = patient + "_" + ts_filename
with open(os.path.join(output_dir, output_ts_filename), "w") as outfile:
outfile.write(header)
for line in ts_lines:
outfile.write(line)
for t in sample_times:
if mortality == 0:
cur_mortality = 0
else:
cur_mortality = int(lived_time - t < future_time_interval)
xty_triples.append((output_ts_filename, t, cur_mortality))
print("Number of created samples:", len(xty_triples))
if partition == "train":
random.shuffle(xty_triples)
if partition == "test":
xty_triples = sorted(xty_triples)
with open(os.path.join(output_dir, "listfile.csv"), "w") as listfile:
listfile.write('stay,period_length,y_true\n')
for (x, t, y) in xty_triples:
listfile.write('{},{:.6f},{:d}\n'.format(x, t, y))
def main():
parser = argparse.ArgumentParser(description="Create data for decompensation prediction task.")
parser.add_argument('root_path', type=str, help="Path to root folder containing train and test sets.")
parser.add_argument('output_path', type=str, help="Directory where the created data should be stored.")
args, _ = parser.parse_known_args()
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
process_partition(args, "test")
process_partition(args, "train")
if __name__ == '__main__':
main()
|
mit
|
btabibian/scikit-learn
|
examples/svm/plot_svm_regression.py
|
120
|
1520
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
bsd-3-clause
|
amolkahat/pandas
|
pandas/tests/io/test_parquet.py
|
1
|
18660
|
""" test parquet compat """
import pytest
import datetime
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.compat import PY3, is_platform_windows, is_platform_mac
from pandas.io.parquet import (to_parquet, read_parquet, get_engine,
PyArrowImpl, FastParquetImpl)
from pandas.util import testing as tm
try:
import pyarrow # noqa
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet # noqa
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
# setup engines & skips
@pytest.fixture(params=[
pytest.param('fastparquet',
marks=pytest.mark.skipif(not _HAVE_FASTPARQUET,
reason='fastparquet is '
'not installed')),
pytest.param('pyarrow',
marks=pytest.mark.skipif(not _HAVE_PYARROW,
reason='pyarrow is '
'not installed'))])
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return 'pyarrow'
@pytest.fixture
def pa_lt_070():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
if LooseVersion(pyarrow.__version__) >= LooseVersion('0.7.0'):
pytest.skip("pyarrow is >= 0.7.0")
return 'pyarrow'
@pytest.fixture
def pa_ge_070():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'):
pytest.skip("pyarrow is < 0.7.0")
return 'pyarrow'
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
return 'fastparquet'
@pytest.fixture
def fp_lt_014():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
if LooseVersion(fastparquet.__version__) >= LooseVersion('0.1.4'):
pytest.skip("fastparquet is >= 0.1.4")
return 'fastparquet'
@pytest.fixture
def df_compat():
return pd.DataFrame({'A': [1, 2, 3], 'B': 'foo'})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('20130101', periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
})
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{'string': list('abc'),
'string_with_nan': ['a', np.nan, 'c'],
'string_with_none': ['a', None, 'c'],
'bytes': [b'foo', b'bar', b'baz'],
'unicode': [u'foo', u'bar', u'baz'],
'int': list(range(1, 4)),
'uint': np.arange(3, 6).astype('u1'),
'float': np.arange(4.0, 7.0, dtype='float64'),
'float_with_nan': [2., np.nan, 3.],
'bool': [True, False, True],
'datetime': pd.date_range('20130101', periods=3),
'datetime_with_nat': [pd.Timestamp('20130101'),
pd.NaT,
pd.Timestamp('20130103')]})
def check_round_trip(df, engine=None, path=None,
write_kwargs=None, read_kwargs=None,
expected=None, check_names=True,
repeat=2):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {'compression': None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs['engine'] = engine
read_kwargs['engine'] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(expected, actual,
check_names=check_names)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def test_invalid_engine(df_compat):
with pytest.raises(ValueError):
check_round_trip(df_compat, 'foo', 'bar')
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context('io.parquet.engine', 'pyarrow'):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context('io.parquet.engine', 'fastparquet'):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context('io.parquet.engine', 'auto'):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
with pd.option_context('io.parquet.engine', 'pyarrow'):
assert isinstance(get_engine('auto'), PyArrowImpl)
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
with pd.option_context('io.parquet.engine', 'fastparquet'):
assert isinstance(get_engine('auto'), FastParquetImpl)
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
with pd.option_context('io.parquet.engine', 'auto'):
assert isinstance(get_engine('auto'), PyArrowImpl)
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
@pytest.mark.xfail(is_platform_windows() or is_platform_mac(),
reason="reading pa metadata failing on Windows/mac",
strict=True)
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=['a', 'd'])
tm.assert_frame_equal(result, df[['a', 'd']])
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=['a', 'd'])
tm.assert_frame_equal(result, df[['a', 'd']])
class Base(object):
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
class TestBasic(Base):
def test_error(self, engine):
for obj in [pd.Series([1, 2, 3]), 1, 'foo', pd.Timestamp('20130101'),
np.array([1, 2, 3])]:
self.check_error_on_write(obj, engine, ValueError)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4))})
# unicode
df.columns = [u'foo', u'bar']
check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4))})
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, engine, ValueError)
if PY3:
# bytes on PY3, on PY2 these are str
df.columns = [b'foo', b'bar']
self.check_error_on_write(df, engine, ValueError)
# python object
df.columns = [datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1)]
self.check_error_on_write(df, engine, ValueError)
@pytest.mark.parametrize('compression', [None, 'gzip', 'snappy', 'brotli'])
def test_compression(self, engine, compression):
if compression == 'snappy':
pytest.importorskip('snappy')
elif compression == 'brotli':
pytest.importorskip('brotli')
df = pd.DataFrame({'A': [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={'compression': compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4))})
expected = pd.DataFrame({'string': list('abc')})
check_round_trip(df, engine, expected=expected,
read_kwargs={'columns': ['string']})
def test_write_index(self, engine):
check_names = engine != 'fastparquet'
if engine == 'pyarrow':
import pyarrow
if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'):
pytest.skip("pyarrow is < 0.7.0")
df = pd.DataFrame({'A': [1, 2, 3]})
check_round_trip(df, engine)
indexes = [
[2, 3, 4],
pd.date_range('20130101', periods=3),
list('abc'),
[1, 3, 4],
]
# non-default index
for index in indexes:
df.index = index
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = 'foo'
check_round_trip(df, engine)
def test_write_multiindex(self, pa_ge_070):
# Not suppoprted in fastparquet as of 0.1.3 or older pyarrow version
engine = pa_ge_070
df = pd.DataFrame({'A': [1, 2, 3]})
index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)])
df.index = index
check_round_trip(df, engine)
def test_write_column_multiindex(self, engine):
# column multi-index
mi_columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)])
df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
self.check_error_on_write(df, engine, ValueError)
def test_multiindex_with_columns(self, pa_ge_070):
engine = pa_ge_070
dates = pd.date_range('01-Jan-2018', '01-Dec-2018', freq='MS')
df = pd.DataFrame(np.random.randn(2 * len(dates), 3),
columns=list('ABC'))
index1 = pd.MultiIndex.from_product(
[['Level1', 'Level2'], dates],
names=['level', 'date'])
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
check_round_trip(df, engine)
check_round_trip(df, engine, read_kwargs={'columns': ['A', 'B']},
expected=df[['A', 'B']])
def test_write_ignoring_index(self, engine):
# ENH 20768
# Ensure index=False omits the index from the written Parquet file.
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['q', 'r', 's']})
write_kwargs = {
'compression': None,
'index': False,
}
# Because we're dropping the index, we expect the loaded dataframe to
# have the default integer index.
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs,
expected=expected)
# Ignore custom index
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['q', 'r', 's']},
index=['zyx', 'wvu', 'tsr'])
check_round_trip(df, engine, write_kwargs=write_kwargs,
expected=expected)
# Ignore multi-indexes as well.
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = pd.DataFrame({'one': [i for i in range(8)],
'two': [-i for i in range(8)]}, index=arrays)
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs,
expected=expected)
class TestParquetPyArrow(Base):
def test_basic(self, pa, df_full):
df = df_full
# additional supported types for pyarrow
import pyarrow
if LooseVersion(pyarrow.__version__) >= LooseVersion('0.7.0'):
df['datetime_tz'] = pd.date_range('20130101', periods=3,
tz='Europe/Brussels')
df['bool_with_none'] = [True, None, True]
check_round_trip(df, pa)
# TODO: This doesn't fail on all systems; track down which
@pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)")
def test_basic_subset_columns(self, pa, df_full):
# GH18628
df = df_full
# additional supported types for pyarrow
df['datetime_tz'] = pd.date_range('20130101', periods=3,
tz='Europe/Brussels')
check_round_trip(df, pa, expected=df[['string', 'int']],
read_kwargs={'columns': ['string', 'int']})
def test_duplicate_columns(self, pa):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
self.check_error_on_write(df, pa, ValueError)
@pytest.mark.xfail(reason="failing for pyarrow < 0.11.0")
def test_unsupported(self, pa):
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
# timedelta
df = pd.DataFrame({'a': pd.timedelta_range('1 day',
periods=3)})
self.check_error_on_write(df, pa, NotImplementedError)
# mixed python objects
df = pd.DataFrame({'a': ['a', 1, 2.0]})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
def test_categorical(self, pa_ge_070):
pa = pa_ge_070
# supported in >= 0.7.0
df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
# de-serialized as object
expected = df.assign(a=df.a.astype(object))
check_round_trip(df, pa, expected=expected)
def test_categorical_unsupported(self, pa_lt_070):
pa = pa_lt_070
# supported in >= 0.7.0
df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
self.check_error_on_write(df, pa, NotImplementedError)
def test_s3_roundtrip(self, df_compat, s3_resource, pa):
# GH #19134
check_round_trip(df_compat, pa,
path='s3://pandas-test/pyarrow.parquet')
class TestParquetFastParquet(Base):
def test_basic(self, fp, df_full):
df = df_full
# additional supported types for fastparquet
if LooseVersion(fastparquet.__version__) >= LooseVersion('0.1.4'):
df['datetime_tz'] = pd.date_range('20130101', periods=3,
tz='US/Eastern')
df['timedelta'] = pd.timedelta_range('1 day', periods=3)
check_round_trip(df, fp)
@pytest.mark.skip(reason="not supported")
def test_duplicate_columns(self, fp):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
self.check_error_on_write(df, fp, ValueError)
def test_bool_with_none(self, fp):
df = pd.DataFrame({'a': [True, None, False]})
expected = pd.DataFrame({'a': [1.0, np.nan, 0.0]}, dtype='float16')
check_round_trip(df, fp, expected=expected)
def test_unsupported(self, fp):
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
self.check_error_on_write(df, fp, ValueError)
# mixed
df = pd.DataFrame({'a': ['a', 1, 2.0]})
self.check_error_on_write(df, fp, ValueError)
def test_categorical(self, fp):
if LooseVersion(fastparquet.__version__) < LooseVersion("0.1.3"):
pytest.skip("CategoricalDtype not supported for older fp")
df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
check_round_trip(df, fp)
def test_datetime_tz(self, fp_lt_014):
# fastparquet<0.1.4 doesn't preserve tz
df = pd.DataFrame({'a': pd.date_range('20130101', periods=3,
tz='US/Eastern')})
# warns on the coercion
with catch_warnings(record=True):
check_round_trip(df, fp_lt_014,
expected=df.astype('datetime64[ns]'))
def test_filter_row_groups(self, fp):
d = {'a': list(range(0, 3))}
df = pd.DataFrame(d)
with tm.ensure_clean() as path:
df.to_parquet(path, fp, compression=None,
row_group_offsets=1)
result = read_parquet(path, fp, filters=[('a', '==', 0)])
assert len(result) == 1
def test_s3_roundtrip(self, df_compat, s3_resource, fp):
# GH #19134
check_round_trip(df_compat, fp,
path='s3://pandas-test/fastparquet.parquet')
|
bsd-3-clause
|
Srisai85/scikit-learn
|
examples/cluster/plot_segmentation_toy.py
|
258
|
3336
|
"""
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
|
bsd-3-clause
|
ishanic/scikit-learn
|
benchmarks/bench_glmnet.py
|
297
|
3848
|
"""
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
hfegetude/EjerciciosMicroondas
|
tema3/ej11/parte1.py
|
1
|
6194
|
import numpy as np
import matplotlib.pyplot as plt
def cart2pol(x, y):
theta = np.arctan2(y, x)
rho = np.hypot(x, y)
return theta, rho
def pol2cart(theta, rho):
x = rho * np.cos(theta)
y = rho * np.sin(theta)
return x, y
def add_radius(x, y, r ):
ang, mod = cart2pol(x, y)
return pol2cart( ang, mod + r)
def adjustFigAspect(fig,aspect=1):
'''
Adjust the subplot parameters so that the figure has the correct
aspect ratio.
'''
xsize,ysize = fig.get_size_inches()
minsize = min(xsize,ysize)
xlim = .4*minsize/xsize
ylim = .4*minsize/ysize
if aspect < 1:
xlim *= aspect
else:
ylim /= aspect
fig.subplots_adjust(left=.5-xlim,
right=.5+xlim,
bottom=.5-ylim,
top=.5+ylim)
def colision(r0, r1, p1x, p1y, p2x, p2y):
d=np.linalg.norm([p1x-p2x , p1y-p2y] )
a=(r0*r0 - r1*r1 + d*d)/(2*d)
h = np.sqrt(r0*r0 - a*a)
p3x = p1x + a*(p2x - p1x)/(d)
p3y = p1y + a*(p2y - p1y)/d
p4x = p3x - h*(p2y - p1y)/d
p4y = p3y + h*(p2x - p1x)/d
return p4x, p4y
def colisionM(r0, r1, p1x, p1y, p2x, p2y):
d=np.linalg.norm([p1x-p2x , p1y-p2y] )
a=(r0*r0 - r1*r1 + d*d)/(2*d)
h = np.sqrt(r0*r0 - a*a)
p3x = p1x + a*(p2x - p1x)/(d)
p3y = p1y + a*(p2y - p1y)/d
p4x = p3x + h*(p2y - p1y)/d
p4y = p3y - h*(p2x - p1x)/d
return p4x, p4y
def line(i):
x = 1 + (1/i) * np.cos(np.arange(0 , 2*np.pi , 0.0001))
y = (1/(i))+(1/(i)) * np.sin(np.arange(0 , 2*np.pi , 0.0001))
x_t , y_t = colision(1, 1/i, 0, 0, 1, 1/i)
x_f = x[x < 1]
y_f = y[x < 1]
y_f = y_f[x_f > -1]
x_f = x_f[x_f > -1]
x_f = x_f[y_f < y_t ]
y_f = y_f[y_f < y_t ]
ax.plot(x_f, y_f , 'k', linewidth = 0.2)
x_text , y_text = add_radius(x_t, y_t, 0.01)
ax.text( x_text,
y_text,
str(i),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
##ax.plot(x_text, y_text, 'ko')
def line2(i):
x = 1 + (1/(-1*i)) * np.cos(np.arange( -np.pi , np.pi, 0.0001))
y = (1/(i*-1))+(1/(i*-1)) * np.sin(np.arange(-np.pi , np.pi, 0.0001))
x_t , y_t = colisionM(1, 1/i, 0, 0, 1, -1/i)
x_f = x[x < 1]
y_f = y[x < 1]
y_f = y_f[x_f > -1]
x_f = x_f[x_f > -1]
x_f = x_f[y_f > y_t ]
y_f = y_f[y_f > y_t ]
x_text , y_text = add_radius(x_t, y_t, 0.02)
ax.text( x_text,
y_text,
str(i),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
#ax.plot(x_t, y_t, 'ko')
ax.plot( x_f[20:] ,y_f[20:] , 'k', linewidth = 0.2)
def paint_line(i, ax):
x = i/(1+i) + (1/(1+i)) * np.cos(np.arange(0 , 2*np.pi , 0.001))
y = (1/(1+i)) * np.sin(np.arange(0 , 2*np.pi , 0.001))
ax.plot(x, y, 'k', linewidth = 0.2)
ax.text( 1-2*(1/(1+i)),
0.02,
str(i),
verticalalignment='bottom',
horizontalalignment='right',
rotation=90,
fontsize=3)
line(i)
line2(i)
def paint_text_degrees():
positions = np.arange(0, np.pi*2, 2*np.pi / 36)
for i, ang in enumerate(positions):
x_t , y_t = pol2cart(ang, 1.04)
ax.text( x_t,
y_t,
str(i*10),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
def paint_text_wavelength():
positions = np.arange(np.pi, 3*np.pi, 2*np.pi / 50)
for i, ang in enumerate(positions):
x_t , y_t = pol2cart(ang, 1.06)
ax.text( x_t,
y_t,
str(i/100),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
def imp2point(v1, v2):
reax = v1/(1+v1)
reay = 0
rear = (1/(1+v1))
imgx = 1
imgy = 1/v2
imgr = 1/v2
return colision(rear, imgr, reax, reay, imgx, imgy)
def move_wl(x, y , wl):
ax_ang, modulos = cart2pol(x, y)
ax_ang += 4*np.pi*wl
return pol2cart(ax_ang, modulos)
x_1= np.cos(np.arange(0 , 2*np.pi , 0.001))
y_1 = np.sin(np.arange(0, 2*np.pi, 0.001) )
fig = plt.figure()
adjustFigAspect(fig,aspect=1)
ax = fig.add_subplot(111)
ax.set_ylim(-1.01 , 1.01)
ax.set_xlim(-1.01, 1.01)
ax.axis('off')
ax.plot(x_1, y_1 , 'k', linewidth = 0.3)
#fig.axhline(y=0, xmin=-0.99, xmax=0.99, color='k', hold=None, linewidth = 0.5)
ax.plot([1, -1], [0, 0], 'k', linewidth = 0.3)
ax.plot([0], [0], 'ko')
#black big lines
for i in np.arange(0.05, 0.2, 0.05):
paint_line(i , ax)
for i in np.arange(0.2, 1, 0.1):
paint_line(i , ax)
for i in np.arange(1, 2, 0.2):
paint_line(i , ax)
for i in np.arange(2, 5, 1):
paint_line(i , ax)
for i in np.array([5, 10, 20, 50]):
paint_line(i , ax)
paint_text_degrees()
paint_text_wavelength()
p1 , p2 = imp2point(0.4, 0.6)
ax.plot(p1, p2, 'ko')
ax.plot([0 ,p1], [0 ,p2], 'r')
start, modd= cart2pol(p1, p2)
p3, p4 = move_wl(p1, p2, 0.2)
ax.plot(p3, p4, 'ko')
end,modd = cart2pol(p3, p4)
data_x = modd*np.cos(np.arange(start , end -2*np.pi, -0.0001))
data_y = modd*np.sin(np.arange(start , end - 2*np.pi, -0.0001))
print(start)
print(end)
ax.plot(data_x, data_y)
i = 0.44
x = i/(1+i) + (1/(1+i)) * np.cos(np.arange(0 , 2*np.pi , 0.001))
y = (1/(1+i)) * np.sin(np.arange(0 , 2*np.pi , 0.001))
ax.plot(x, y, 'r', linewidth = 0.5)
i = 0.68
x = 1 + (1/(-1*i)) * np.cos(np.arange( -np.pi , np.pi, 0.0001))
y = (1/(i*-1))+(1/(i*-1)) * np.sin(np.arange(-np.pi , np.pi, 0.0001))
x_t , y_t = colisionM(1, 1/i, 0, 0, 1, -1/i)
x_f = x[x < 1]
y_f = y[x < 1]
y_f = y_f[x_f > -1]
x_f = x_f[x_f > -1]
x_f = x_f[y_f > y_t ]
y_f = y_f[y_f > y_t ]
ax.plot(x_f, y_f , 'r', linewidth = 0.5)
fig.savefig('images/out1.pdf')
|
gpl-3.0
|
NDManh/numbbo
|
code-postprocessing/bbob_pproc/ppfigparam.py
|
3
|
9956
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Generate ERT vs param. figures.
The figures will show the performance in terms of ERT on a log scale
w.r.t. parameter. On the y-axis, data is represented as
a number of function evaluations. Crosses (+) give the median number of
function evaluations for the smallest reached target function value
(also divided by dimension). Crosses (×) give the average number of
overall conducted function evaluations in case the smallest target
function value (1e-8) was not reached.
"""
from __future__ import absolute_import
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from pdb import set_trace
from . import toolsstats, bestalg
from .ppfig import saveFigure, groupByRange
__all__ = ['beautify', 'plot', 'main']
avgstyle = dict(color='r', marker='x', markersize=20)
medmarker = dict(linestyle='', marker='+', markersize=30, markeredgewidth=5,
zorder=-1)
colors = ('k', 'b', 'c', 'g', 'y', 'm', 'r', 'k', 'k', 'c', 'r', 'm') # sort of rainbow style
styles = [{'color': 'k', 'marker': 'o', 'markeredgecolor': 'k'},
{'color': 'b'},
{'color': 'c', 'marker': 'v', 'markeredgecolor': 'c'},
{'color': 'g'},
{'color': 'y', 'marker': '^', 'markeredgecolor': 'y'},
{'color': 'm'},
{'color': 'r', 'marker': 's', 'markeredgecolor': 'r'}] # sort of rainbow style
refcolor = 'wheat'
# should correspond with the colors in pprldistr.
dimsBBOB = (2, 3, 5, 10, 20, 40)
#Get benchmark short infos.
def read_fun_infos(isBiobjective):
try:
funInfos = {}
filename = 'biobj-benchmarkshortinfos.txt' if isBiobjective else 'benchmarkshortinfos.txt'
infofile = os.path.join(os.path.split(__file__)[0], filename)
f = open(infofile, 'r')
for line in f:
if len(line) == 0 or line.startswith('%') or line.isspace() :
continue
funcId, funcInfo = line[0:-1].split(None, 1)
funInfos[int(funcId)] = funcId + ' ' + funcInfo
f.close()
return funInfos
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
print 'Could not find file', infofile, \
'Titles in figures will not be displayed.'
def beautify():
"""Customize figure presentation."""
# Input checking
# Get axis handle and set scale for each axis
axisHandle = plt.gca()
axisHandle.set_xscale("log")
axisHandle.set_yscale("log")
# Grid options
axisHandle.grid(True)
ymin, ymax = plt.ylim()
xmin, xmax = plt.xlim()
# quadratic and cubic "grid"
#plt.plot((2,200), (1, 1e2), 'k:')
#plt.plot((2,200), (1, 1e4), 'k:')
#plt.plot((2,200), (1e3, 1e5), 'k:')
#plt.plot((2,200), (1e3, 1e7), 'k:')
#plt.plot((2,200), (1e6, 1e8), 'k:')
#plt.plot((2,200), (1e6, 1e10), 'k:')
# axes limits
plt.ylim(ymin=10**-0.2, ymax=ymax) # Set back the previous maximum.
# ticks on axes
# axisHandle.invert_xaxis()
# plt.xlim(1.8, 45) # TODO should become input arg?
# dimticklist = (2, 3, 4, 5, 10, 20, 40) # TODO: should become input arg at some point?
# dimannlist = (2, 3, '', 5, 10, 20, 40) # TODO: should become input arg at some point?
# TODO: All these should depend on one given input (xlim, ylim)
# axisHandle.set_xticks(dimticklist)
# axisHandle.set_xticklabels([str(n) for n in dimannlist])
tmp = axisHandle.get_yticks()
tmp2 = []
for i in tmp:
tmp2.append('%d' % round(np.log10(i)))
axisHandle.set_yticklabels(tmp2)
plt.ylabel('Run Lengths')
def plot(dsList, param='dim', targets=(10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8)):
"""Generate plot of ERT vs param."""
dictparam = dsList.dictByParam(param)
params = sorted(dictparam) # sorted because we draw lines
# generate plot from dsList
res = []
# collect data
rawdata = {}
for p in params:
assert len(dictparam[p]) == 1
rawdata[p] = dictparam[p][0].detEvals(targets)
# expect dictparam[p] to have only one element
# plot lines for ERT
xpltdata = params
for i, t in enumerate(targets):
ypltdata = []
for p in params:
data = rawdata[p][i]
unsucc = np.isnan(data)
assert len(dictparam[p]) == 1
data[unsucc] = dictparam[p][0].maxevals
# compute ERT
ert, srate, succ = toolsstats.sp(data, issuccessful=(unsucc == False))
ypltdata.append(ert)
res.extend(plt.plot(xpltdata, ypltdata, markersize=20,
zorder=len(targets) - i, **styles[i]))
# for the legend
plt.plot([], [], markersize=10,
label=' %+d' % (np.log10(targets[i])),
**styles[i])
# plot median of successful runs for hardest target with a success
for p in params:
for i, t in enumerate(reversed(targets)): # targets has to be from hardest to easiest
data = rawdata[p][i]
data = data[np.isnan(data) == False]
if len(data) > 0:
median = toolsstats.prctile(data, 50.)[0]
res.extend(plt.plot(p, median, styles[i]['color'], **medmarker))
break
# plot average number of function evaluations for the hardest target
xpltdata = []
ypltdata = []
for p in params:
data = rawdata[p][0] # first target
xpltdata.append(p)
if (np.isnan(data) == False).all():
tmpdata = data.copy()
assert len(dictparam[p]) == 1
tmpdata[np.isnan(data)] = dictparam[p][0].maxevals[np.isnan(data)]
tmp = np.mean(tmpdata)
else:
tmp = np.nan # Check what happens when plotting NaN
ypltdata.append(tmp)
res.extend(plt.plot(xpltdata, ypltdata, **avgstyle))
# display numbers of successes for hardest target where there is still one success
for p in params:
for i, t in enumerate(targets): # targets has to be from hardest to easiest
data = rawdata[p][i]
unsucc = np.isnan(data)
assert len(dictparam[p]) == 1
data[unsucc] = dictparam[p][0].maxevals
# compute ERT
ert, srate, succ = toolsstats.sp(data, issuccessful=(unsucc == False))
if srate == 1.:
break
elif succ > 0:
res.append(plt.text(p, ert * 1.85, "%d" % succ, axes=plt.gca(),
horizontalalignment="center",
verticalalignment="bottom"))
break
return res
def main(dsList, _targets=(10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8),
param=('dim', 'Dimension'), is_normalized=True, outputdir='.',
verbose=True):
"""Generates figure of ERT vs. param.
This script will generate as many figures as there are functions.
For a given function and a given parameter value there should be
only **one** data set.
Crosses (+) give the median number of function evaluations of
successful trials for the smallest reached target function value.
Crosses (x) give the average number of overall conducted function
evaluations in case the smallest target function value (1e-8) was
not reached.
:keyword DataSetList dsList: data sets
:keyword seq _targets: target precisions
:keyword tuple param: parameter on x-axis. The first element has to
be a string corresponding to the name of an
attribute common to elements of dsList. The
second element has to be a string which will
be used as label for the figures. The values
of attribute param have to be sortable.
:keyword bool is_normalized: if True the y values are normalized by
x values
:keyword string outputdir: name of output directory for the image
files
:keyword bool verbose: controls verbosity
"""
funInfos = read_fun_infos(dsList.isBiobjective())
# TODO check input parameter param
for func, dictfunc in dsList.dictByFunc().iteritems():
filename = os.path.join(outputdir,'ppfigparam_%s_f%03d' % (param[0], func))
try:
targets = list(j[func] for j in _targets)
except TypeError:
targets = _targets
targets = sorted(targets) # from hard to easy
handles = plot(dictfunc, param[0], targets)
# # display best 2009
# if not bestalg.bestalgentries2009:
# bestalg.loadBBOB2009()
# bestalgdata = []
# for d in dimsBBOB:
# entry = bestalg.bestalgentries2009[(d, func)]
# tmp = entry.detERT([1e-8])[0]
# if not np.isinf(tmp):
# bestalgdata.append(tmp/d)
# else:
# bestalgdata.append(None)
# plt.plot(dimsBBOB, bestalgdata, color=refcolor, linewidth=10, zorder=-2)
# plt.plot(dimsBBOB, bestalgdata, ls='', marker='d', markersize=25,
# color=refcolor, markeredgecolor=refcolor, zorder=-2)
a = plt.gca()
if is_normalized:
for i in handles:
try:
plt.setp(i, 'ydata', plt.getp(i, 'ydata') / plt.getp(i, 'xdata'))
except TypeError:
pass
a.relim()
a.autoscale_view()
beautify()
plt.xlabel(param[1])
if is_normalized:
plt.setp(plt.gca(), 'ylabel', plt.getp(a, 'ylabel') + ' / ' + param[1])
if func in (1, 24, 101, 130):
plt.legend(loc="best")
if func in funInfos.keys():
a.set_title(funInfos[func])
saveFigure(filename, verbose=verbose)
plt.close()
|
bsd-3-clause
|
JackKelly/neuralnilm_prototype
|
scripts/e203.py
|
2
|
6772
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
e189
* divide dominant appliance power
* mse
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
# global source
# source = RealApplianceSource(
# filename='/data/dk3810/ukdale.h5',
# appliances=[
# ['fridge freezer', 'fridge', 'freezer'],
# 'hair straighteners',
# 'television'
# # 'dish washer',
# # ['washer dryer', 'washing machine']
# ],
# max_appliance_powers=[2500] * 5,
# on_power_thresholds=[5] * 5,
# max_input_power=2500,
# min_on_durations=[60, 60, 60, 1800, 1800],
# min_off_durations=[12, 12, 12, 1800, 600],
# window=("2013-06-01", "2014-07-01"),
# seq_length=1520,
# output_one_appliance=False,
# boolean_targets=False,
# train_buildings=[1],
# validation_buildings=[1],
# skip_probability=0.7,
# n_seq_per_batch=25,
# input_padding=1,
# include_diff=False,
# clip_appliance_power=False
# )
net = Net(
experiment_name=name,
source=source,
save_plot_interval=1000,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.1, clip_range=(-1, 1)),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
# 'W': Uniform()
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
|
mit
|
chugunovyar/factoryForBuild
|
env/lib/python2.7/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py
|
18
|
26105
|
"""
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
self._limits_inverted = False
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
if self.nth_coord == 0:
xy1, xy2 = axes.get_ylim()
else:
xy1, xy2 = axes.get_xlim()
if xy1 > xy2:
self._limits_inverted = True
else:
self._limits_inverted = False
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
if self._limits_inverted:
side = {"left":"right","right":"left",
"top":"bottom", "bottom":"top"}[self.side]
else:
side = self.side
ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
if None in self._extremes:
e0, e1 = self._extremes
else:
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(list(zip(x, y)))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
if axis in ["both", "y"]:
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from .axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in list(six.itervalues(ax1.axis)):
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
|
gpl-3.0
|
quheng/scikit-learn
|
sklearn/utils/arpack.py
|
265
|
64837
|
"""
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
|
bsd-3-clause
|
JeanKossaifi/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
300
|
5078
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
bsd-3-clause
|
TakayukiSakai/tensorflow
|
tensorflow/examples/skflow/boston.py
|
4
|
1596
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, cross_validation, metrics
from sklearn import preprocessing
from tensorflow.contrib import skflow
# Load dataset
boston = datasets.load_boston()
X, y = boston.data, boston.target
# Split dataset into train / test
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,
test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = skflow.TensorFlowDNNRegressor(hidden_units=[10, 10],
steps=5000, learning_rate=0.1, batch_size=1)
# Fit
regressor.fit(X_train, y_train)
# Predict and score
score = metrics.mean_squared_error(regressor.predict(scaler.transform(X_test)), y_test)
print('MSE: {0:f}'.format(score))
|
apache-2.0
|
maheshakya/scikit-learn
|
sklearn/utils/testing.py
|
2
|
22085
|
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Py 2.6
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [warning.category is warning_class for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>."
% (message, func.__name__))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict: contains data as
columns_dict[column_name] = array of data
dataname: name of data set
matfile: file-like object or file name
ordering: list of column_names, determines the ordering in the data set
Note: this function transposes all arrays, while fetch_mldata only
transposes 'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer',
'TfidfTransformer', 'IsotonicRegression', 'OneHotEncoder',
'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier',
'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash']
def all_estimators(include_meta_estimators=False, include_other=False,
type_filter=None, include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
|
bsd-3-clause
|
crisbarros/trading-with-python
|
cookbook/reconstructVXX/reconstructVXX.py
|
77
|
3574
|
# -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
|
bsd-3-clause
|
yavalvas/yav_com
|
build/matplotlib/lib/mpl_examples/pylab_examples/trigradient_demo.py
|
7
|
3075
|
"""
Demonstrates computation of gradient with matplotlib.tri.CubicTriInterpolator.
"""
from matplotlib.tri import Triangulation, UniformTriRefiner,\
CubicTriInterpolator
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import math
#-----------------------------------------------------------------------------
# Electrical potential of a dipole
#-----------------------------------------------------------------------------
def dipole_potential(x, y):
""" The electric dipole potential V """
r_sq = x**2 + y**2
theta = np.arctan2(y, x)
z = np.cos(theta)/r_sq
return (np.max(z)-z) / (np.max(z)-np.min(z))
#-----------------------------------------------------------------------------
# Creating a Triangulation
#-----------------------------------------------------------------------------
# First create the x and y coordinates of the points.
n_angles = 30
n_radii = 10
min_radius = 0.2
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2*math.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += math.pi/n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
V = dipole_potential(x, y)
# Create the Triangulation; no triangles specified so Delaunay triangulation
# created.
triang = Triangulation(x, y)
# Mask off unwanted triangles.
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
triang.set_mask(mask)
#-----------------------------------------------------------------------------
# Refine data - interpolates the electrical potential V
#-----------------------------------------------------------------------------
refiner = UniformTriRefiner(triang)
tri_refi, z_test_refi = refiner.refine_field(V, subdiv=3)
#-----------------------------------------------------------------------------
# Computes the electrical field (Ex, Ey) as gradient of electrical potential
#-----------------------------------------------------------------------------
tci = CubicTriInterpolator(triang, -V)
# Gradient requested here at the mesh nodes but could be anywhere else:
(Ex, Ey) = tci.gradient(triang.x, triang.y)
E_norm = np.sqrt(Ex**2 + Ey**2)
#-----------------------------------------------------------------------------
# Plot the triangulation, the potential iso-contours and the vector field
#-----------------------------------------------------------------------------
plt.figure()
plt.gca().set_aspect('equal')
plt.triplot(triang, color='0.8')
levels = np.arange(0., 1., 0.01)
cmap = cm.get_cmap(name='hot', lut=None)
plt.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap,
linewidths=[2.0, 1.0, 1.0, 1.0])
# Plots direction of the electrical vector field
plt.quiver(triang.x, triang.y, Ex/E_norm, Ey/E_norm,
units='xy', scale=10., zorder=3, color='blue',
width=0.007, headwidth=3., headlength=4.)
plt.title('Gradient plot: an electrical dipole')
plt.show()
|
mit
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-torch-geometric/package.py
|
2
|
3179
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyTorchGeometric(PythonPackage):
"""PyTorch Geometric (PyG) is a geometric deep learning extension
library for PyTorch. It consists of various methods for deep
learning on graphs and other irregular structures, also known as
geometric deep learning, from a variety of published papers. In
addition, it consists of an easy-to-use mini-batch loader for many
small and single giant graphs, multi gpu-support, a large number
of common benchmark datasets (based on simple interfaces to create
your own), and helpful transforms, both for learning on arbitrary
graphs as well as on 3D meshes or point clouds."""
homepage = "https://github.com/rusty1s/pytorch_geometric"
url = "https://github.com/rusty1s/pytorch_geometric/archive/1.6.0.tar.gz"
version('1.6.0', sha256='7d5231cdcc2ebd4444f406cbf1537eb49bf90ab6f446eaf1b7af5cdbe105f3c9')
variant('cuda', default=False, description="Enable CUDA support")
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pytest-runner', type='build')
depends_on('py-torch', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-tqdm', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-networkx', type=('build', 'run'))
depends_on('py-scikit-learn', type=('build', 'run'))
depends_on('py-numba', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-rdflib', type=('build', 'run'))
depends_on('py-googledrivedownloader', type=('build', 'run'))
depends_on('py-h5py~mpi', type=('build', 'run'))
depends_on('py-ase', type=('build', 'run'))
depends_on('py-jinja2', type=('build', 'run'))
depends_on('py-torch-sparse+cuda', when='+cuda', type=('build', 'run'))
depends_on('py-torch-scatter+cuda', when='+cuda', type=('build', 'run'))
depends_on('py-torch-cluster+cuda', when='+cuda', type=('build', 'run'))
depends_on('py-torch-spline-conv+cuda', when='+cuda', type=('build', 'run'))
depends_on('py-torch-sparse~cuda', when='~cuda', type=('build', 'run'))
depends_on('py-torch-scatter~cuda', when='~cuda', type=('build', 'run'))
depends_on('py-torch-cluster~cuda', when='~cuda', type=('build', 'run'))
depends_on('py-torch-spline-conv~cuda', when='~cuda', type=('build', 'run'))
def setup_build_environment(self, env):
if '+cuda' in self.spec:
cuda_arches = list(
self.spec['py-torch'].variants['cuda_arch'].value)
for i, x in enumerate(cuda_arches):
cuda_arches[i] = '{0}.{1}'.format(x[0:-1], x[-1])
env.set('TORCH_CUDA_ARCH_LIST', str.join(' ', cuda_arches))
env.set('FORCE_CUDA', '1')
env.set('CUDA_HOME', self.spec['cuda'].prefix)
else:
env.set('FORCE_CUDA', '0')
|
lgpl-2.1
|
gdetor/SI-RF-Structure
|
Responses/DNF-2D-REF-Response.py
|
1
|
5461
|
# Copyright (c) 2014, Georgios Is. Detorakis ([email protected]) and
# Nicolas P. Rougier ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# DNF-2D-SOM-REF.py computes the responses of the model described in [1]
# in normal case.
'''
Model response. Calculates the response of the DNF-SOM-MultiDimInput
model.
'''
import math as mt
import numpy as np
from sys import stdout
import matplotlib.pyplot as plt
from numpy.fft import rfft2, irfft2, ifftshift
def progress_bar(width, percent):
marks = mt.floor(width * (percent/100.0))
spaces = mt.floor(width - marks)
loader = '[' + ('#' * int(marks)) + (' ' * int(spaces)) + ']'
stdout.write("%s %d%%\r" % (loader, percent))
if percent >= 100:
stdout.write("\n")
stdout.flush()
def g(x, sigma=1.0):
return np.exp(-0.5*(x/sigma)**2)
def area_of_activity(data):
return sum(1 for i in data.flatten() if i > 0.0)
def dnfsom_activity(n, Rn, l, tau, T, alpha, folder):
ms = 0.001 # ms definition
dt = 35.0 * ms # Euler's time step
# Files to be loaded
filename = 'weights050000.npy'
filenames = 'model_response_64_final'
# Allocation of arrays and loading necessary files
O = np.zeros((l*n, l*n))
W = np.load(folder+filename)
Rx = np.load(folder+'gridxcoord.npy')
Ry = np.load(folder+'gridycoord.npy')
V = np.random.random((n, n)) * .01
U = np.random.random((n, n)) * .01
# FFT implementation
mean = 0.5
Ke, Ki = 3.65, 2.40
sigma_e, sigma_i = 0.1, 1.0
x_inf, x_sup, y_inf, y_sup = 0.0, 1.0, 0.0, 1.0
X, Y = np.meshgrid(np.linspace(x_inf, x_sup, n+1)[1:],
np.linspace(y_inf, y_sup, n+1)[1:])
D = np.sqrt((X-mean)**2 + (Y-mean)**2)
We = Ke * g(D, sigma_e) * alpha * 960.0/(n*n)
Wi = Ki * g(D, sigma_i) * alpha * 960.0/(n*n)
We_fft = rfft2(ifftshift(We[::-1, ::-1]))
Wi_fft = rfft2(ifftshift(Wi[::-1, ::-1]))
# Stimuli generation
S = np.zeros((l*l, 2))
for i, x in enumerate(np.linspace(0.0, 1.0, l)):
for j, y in enumerate(np.linspace(0.0, 1.0, l)):
S[i*l+j, 0] = x
S[i*l+j, 1] = y
dX = np.abs(Rx.reshape(1, Rn*Rn) - S[:, 0].reshape(l*l, 1))
dX = np.minimum(dX, 1-dX)
dY = np.abs(Ry.reshape(1, Rn*Rn) - S[:, 1].reshape(l*l, 1))
dY = np.minimum(dY, 1-dY)
samples = np.sqrt(dX*dX+dY*dY)/mt.sqrt(2.0)
samples = g(samples, 0.08)
# Calculation of model response
step = 0
jj = 100.0/(float(l))
for i in range(l):
for j in range(l):
D = ((np.abs(W - samples[i*l+j])).sum(axis=-1))/float(Rn*Rn)
I = (1.0 - D.reshape(n, n)) * alpha
for k in range(int(T/dt)):
Z = rfft2(V)
Le = irfft2(Z * We_fft, (n, n)).real
Li = irfft2(Z * Wi_fft, (n, n)).real
U += (-U + (Le - Li) + I) * tau * dt
V = np.maximum(U, 0)
O[i*n:(i+1)*n, j*n:(j+1)*n] = V
V = np.random.random((n, n)) * .01
U = np.random.random((n, n)) * .01
step += jj
progress_bar(30, step)
np.save(folder+filenames, O)
plt.imshow(O, interpolation='bicubic', cmap=plt.cm.hot,
extent=[0, l*n, 0, l*n])
plt.xticks(np.arange(0, l*n, n), [])
plt.yticks(np.arange(0, l*n, n), [])
plt.show()
if __name__ == '__main__':
np.random.seed(137)
model_size = 32
rf_resolution = 64
num_receptors = 16
T = 10.0
tau = 1.0
alpha = 0.1
# Change the folder path!!!
folder = '/home/Local/SOM/Parameters/25Noise/'
dnfsom_activity(model_size, num_receptors, rf_resolution, tau, T, alpha,
folder)
|
gpl-3.0
|
JorgeDeLosSantos/ubim
|
manuscript/images/ch4/code_mpl.py
|
1
|
1168
|
#-*- coding:utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
__all__ = ["img_01",
"img_02",
"img_03",
"img_04",
"img_05",
"img_06",
"img_07"]
def img_01():
x = np.linspace(0,10)
y = np.cos(x)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
plt.savefig("img_01.png")
def img_02():
T = [50, 60, 70, 80, 90, 100, 110, 120]
P = [12, 20, 33, 54, 90, 148, 244, 403]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(T, P)
ax.set_xlabel(u"Temperatura (°C)")
ax.set_ylabel(u"Presión (KPa)")
ax.set_title(u"Relación P-T")
plt.savefig("img_02.png")
def img_03():
x = np.linspace(0,10)
y = np.cos(x)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, lw=2)
ax.plot(x, y+1, lw=4)
ax.plot(x, y+2, linewidth=6)
plt.savefig("img_03c.png")
def img_04():
theta = np.linspace(0,2*np.pi,1000)
r = 0.25*np.cos(3*theta)
fig = plt.figure()
ax = fig.add_subplot(111, projection="polar")
ax.plot(theta, r)
plt.savefig("img_04.png")
def img_05():pass
def img_06():pass
def img_07():pass
def img_08():pass
if __name__=='__main__':
[eval(fun+"()") for fun in __all__]
|
mit
|
anirudhjayaraman/scikit-learn
|
examples/exercises/plot_cv_diabetes.py
|
231
|
2527
|
"""
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
|
bsd-3-clause
|
lyrixderaven/AdventOfCode
|
18.advent.py
|
1
|
1633
|
from inputs import EIGHTEENTH
from pandas import DataFrame
initial = EIGHTEENTH.initial
class grid:
grid = None
def __init__(self, base):
self.grid = DataFrame(index=range(0,100),columns=range(0,100))
self.grid = self.grid.fillna(False)
for column,line in enumerate(initial):
for index,c in enumerate(line):
if c == '#':
self.grid.loc[column,index] = True
self.grid.iloc[0,0] = True
self.grid.iloc[0,-1] = True
self.grid.iloc[-1,-1] = True
self.grid.iloc[-1,0] = True
def iterate(self):
new_grid = DataFrame(index=range(0,100),columns=range(0,100))
new_grid = new_grid.fillna(False)
for col in self.grid.columns:
for idx,val in enumerate(self.grid[col]):
col_left = col - 1 if col > 1 else 0
col_right = col + 1
idx_left = idx - 1 if idx > 1 else 0
idx_right = idx + 1
num_neighbors = self.grid.loc[idx_left:idx_right,col_left:col_right].sum().sum()
if self.grid.iloc[idx,col]:
if num_neighbors in [3,4]:
new_grid.loc[col][idx] = True
else:
if num_neighbors == 3:
new_grid.loc[col][idx] = True
new_grid.iloc[0,0] = True
new_grid.iloc[0,-1] = True
new_grid.iloc[-1,-1] = True
new_grid.iloc[-1,0] = True
self.grid = new_grid
the_grid = grid(initial)
for i in range(1,101):
the_grid.iterate()
print i, the_grid.grid.sum().sum()
|
gpl-2.0
|
code-sauce/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator.py
|
4
|
53699
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import inspect
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: A RunConfig instance.
"""
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated_arg_values(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate. '
'input_fn (and in most cases, input_feature_key) will become required '
'args, and use_deprecated_input_fn will default to False and be removed '
'altogether.',
use_deprecated_input_fn=True,
input_fn=None)
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
eval_dict = self._get_eval_ops(features, labels, metrics).eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=config_pb2.ConfigProto(allow_soft_placement=True))
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
config=config_pb2.ConfigProto(allow_soft_placement=True)))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=config_pb2.ConfigProto(allow_soft_placement=True)
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _get_arguments(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _get_arguments(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_ops should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
with tf_session.Session('') as session:
variables.initialize_local_variables()
data_flow_ops.tables_initializer()
saver_for_restore = saver.Saver(
variables.global_variables(),
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
data_flow_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
|
apache-2.0
|
mrshu/scikit-learn
|
sklearn/__check_build/__init__.py
|
8
|
1670
|
""" Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError(
"""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
|
bsd-3-clause
|
lmallin/coverage_test
|
python_venv/lib/python2.7/site-packages/pandas/core/computation/align.py
|
7
|
5835
|
"""Core eval alignment algorithms
"""
import warnings
from functools import partial, wraps
from pandas.compat import zip, range
import numpy as np
import pandas as pd
from pandas import compat
from pandas.errors import PerformanceWarning
from pandas.core.common import flatten
from pandas.core.computation.common import _result_type_many
def _align_core_single_unary_op(term):
if isinstance(term.value, np.ndarray):
typ = partial(np.asanyarray, dtype=term.value.dtype)
else:
typ = type(term.value)
ret = typ,
if not hasattr(term.value, 'axes'):
ret += None,
else:
ret += _zip_axes_from_type(typ, term.value.axes),
return ret
def _zip_axes_from_type(typ, new_axes):
axes = {}
for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES):
axes[ax_name] = new_axes[ax_ind]
return axes
def _any_pandas_objects(terms):
"""Check a sequence of terms for instances of PandasObject."""
return any(isinstance(term.value, pd.core.generic.PandasObject)
for term in terms)
def _filter_special_cases(f):
@wraps(f)
def wrapper(terms):
# single unary operand
if len(terms) == 1:
return _align_core_single_unary_op(terms[0])
term_values = (term.value for term in terms)
# we don't have any pandas objects
if not _any_pandas_objects(terms):
return _result_type_many(*term_values), None
return f(terms)
return wrapper
@_filter_special_cases
def _align_core(terms):
term_index = [i for i, term in enumerate(terms)
if hasattr(term.value, 'axes')]
term_dims = [terms[i].value.ndim for i in term_index]
ndims = pd.Series(dict(zip(term_index, term_dims)))
# initial axes are the axes of the largest-axis'd term
biggest = terms[ndims.idxmax()].value
typ = biggest._constructor
axes = biggest.axes
naxes = len(axes)
gt_than_one_axis = naxes > 1
for value in (terms[i].value for i in term_index):
is_series = isinstance(value, pd.Series)
is_series_and_gt_one_axis = is_series and gt_than_one_axis
for axis, items in enumerate(value.axes):
if is_series_and_gt_one_axis:
ax, itm = naxes - 1, value.index
else:
ax, itm = axis, items
if not axes[ax].is_(itm):
axes[ax] = axes[ax].join(itm, how='outer')
for i, ndim in compat.iteritems(ndims):
for axis, items in zip(range(ndim), axes):
ti = terms[i].value
if hasattr(ti, 'reindex_axis'):
transpose = isinstance(ti, pd.Series) and naxes > 1
reindexer = axes[naxes - 1] if transpose else items
term_axis_size = len(ti.axes[axis])
reindexer_size = len(reindexer)
ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))
if ordm >= 1 and reindexer_size >= 10000:
warnings.warn('Alignment difference on axis {0} is larger '
'than an order of magnitude on term {1!r}, '
'by more than {2:.4g}; performance may '
'suffer'.format(axis, terms[i].name, ordm),
category=PerformanceWarning,
stacklevel=6)
if transpose:
f = partial(ti.reindex, index=reindexer, copy=False)
else:
f = partial(ti.reindex_axis, reindexer, axis=axis,
copy=False)
terms[i].update(f())
terms[i].update(terms[i].value.values)
return typ, _zip_axes_from_type(typ, axes)
def _align(terms):
"""Align a set of terms"""
try:
# flatten the parse tree (a nested list, really)
terms = list(flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, pd.core.generic.NDFrame):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.isscalar for term in terms):
return _result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes
def _reconstruct_object(typ, obj, axes, dtype):
"""Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if (not isinstance(typ, partial) and
issubclass(typ, pd.core.generic.PandasObject)):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
# The condition is to distinguish 0-dim array (returned in case of
# scalar) and 1 element array
# e.g. np.array(0) and np.array([0])
if len(obj.shape) == 1 and len(obj) == 1:
if not isinstance(ret_value, np.ndarray):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value
|
mit
|
Eric89GXL/mne-python
|
mne/tests/test_cov.py
|
6
|
33641
|
# Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import itertools as itt
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
import numpy as np
from scipy import linalg
from mne.cov import (regularize, whiten_evoked,
_auto_low_rank_model,
prepare_noise_cov, compute_whitener,
_regularized_covariance)
from mne import (read_cov, write_cov, Epochs, merge_events,
find_events, compute_raw_covariance,
compute_covariance, read_evokeds, compute_proj_raw,
pick_channels_cov, pick_types, make_ad_hoc_cov,
make_fixed_length_events, create_info)
from mne.channels import equalize_channels
from mne.datasets import testing
from mne.fixes import _get_args
from mne.io import read_raw_fif, RawArray, read_raw_ctf
from mne.io.pick import _DATA_CH_TYPES_SPLIT
from mne.preprocessing import maxwell_filter
from mne.rank import _compute_rank_int
from mne.utils import (requires_sklearn, run_tests_if_main,
catch_logging, assert_snr)
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
cov_fname = op.join(base_dir, 'test-cov.fif')
cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
ave_fname = op.join(base_dir, 'test-ave.fif')
erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
ctf_fname = op.join(testing.data_path(download=False), 'CTF',
'testdata_ctf.ds')
@pytest.mark.parametrize('proj', (True, False))
@pytest.mark.parametrize('pca', (True, 'white', False))
def test_compute_whitener(proj, pca):
"""Test properties of compute_whitener."""
raw = read_raw_fif(raw_fname).crop(0, 3).load_data()
raw.pick_types(meg=True, eeg=True, exclude=())
if proj:
raw.apply_proj()
else:
raw.del_proj()
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw)
W, _, C = compute_whitener(cov, raw.info, pca=pca, return_colorer=True,
verbose='error')
n_channels = len(raw.ch_names)
n_reduced = len(raw.ch_names)
rank = n_channels - len(raw.info['projs'])
n_reduced = rank if pca is True else n_channels
assert W.shape == C.shape[::-1] == (n_reduced, n_channels)
# round-trip mults
round_trip = np.dot(W, C)
if pca is True:
assert_allclose(round_trip, np.eye(n_reduced), atol=1e-7)
elif pca == 'white':
# Our first few rows/cols are zeroed out in the white space
assert_allclose(round_trip[-rank:, -rank:],
np.eye(rank), atol=1e-7)
else:
assert pca is False
assert_allclose(round_trip, np.eye(n_channels), atol=0.05)
def test_cov_mismatch():
"""Test estimation with MEG<->Head mismatch."""
raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
events = find_events(raw, stim_channel='STI 014')
raw.pick_channels(raw.ch_names[:5])
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
for kind in ('shift', 'None'):
epochs_2 = epochs.copy()
# This should be fine
compute_covariance([epochs, epochs_2])
if kind == 'shift':
epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
else: # None
epochs_2.info['dev_head_t'] = None
pytest.raises(ValueError, compute_covariance, [epochs, epochs_2])
compute_covariance([epochs, epochs_2], on_mismatch='ignore')
with pytest.raises(RuntimeWarning, match='transform mismatch'):
compute_covariance([epochs, epochs_2], on_mismatch='warn')
pytest.raises(ValueError, compute_covariance, epochs,
on_mismatch='x')
# This should work
epochs.info['dev_head_t'] = None
epochs_2.info['dev_head_t'] = None
compute_covariance([epochs, epochs_2], method=None)
def test_cov_order():
"""Test covariance ordering."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
info = raw.info
# add MEG channel with low enough index number to affect EEG if
# order is incorrect
info['bads'] += ['MEG 0113']
ch_names = [info['ch_names'][pick]
for pick in pick_types(info, meg=False, eeg=True)]
cov = read_cov(cov_fname)
# no avg ref present warning
prepare_noise_cov(cov, info, ch_names, verbose='error')
# big reordering
cov_reorder = cov.copy()
order = np.random.RandomState(0).permutation(np.arange(len(cov.ch_names)))
cov_reorder['names'] = [cov['names'][ii] for ii in order]
cov_reorder['data'] = cov['data'][order][:, order]
# Make sure we did this properly
_assert_reorder(cov_reorder, cov, order)
# Now check some functions that should get the same result for both
# regularize
with pytest.raises(ValueError, match='rank, if str'):
regularize(cov, info, rank='foo')
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=False)
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=1.)
cov_reg = regularize(cov, info, rank='full')
cov_reg_reorder = regularize(cov_reorder, info, rank='full')
_assert_reorder(cov_reg_reorder, cov_reg, order)
# prepare_noise_cov
cov_prep = prepare_noise_cov(cov, info, ch_names)
cov_prep_reorder = prepare_noise_cov(cov, info, ch_names)
_assert_reorder(cov_prep, cov_prep_reorder,
order=np.arange(len(cov_prep['names'])))
# compute_whitener
whitener, w_ch_names, n_nzero = compute_whitener(
cov, info, return_rank=True)
assert whitener.shape[0] == whitener.shape[1]
whitener_2, w_ch_names_2, n_nzero_2 = compute_whitener(
cov_reorder, info, return_rank=True)
assert_array_equal(w_ch_names_2, w_ch_names)
assert_allclose(whitener_2, whitener, rtol=1e-6)
assert n_nzero == n_nzero_2
# with pca
assert n_nzero < whitener.shape[0]
whitener_pca, w_ch_names_pca, n_nzero_pca = compute_whitener(
cov, info, pca=True, return_rank=True)
assert_array_equal(w_ch_names_pca, w_ch_names)
assert n_nzero_pca == n_nzero
assert whitener_pca.shape == (n_nzero_pca, len(w_ch_names))
# whiten_evoked
evoked = read_evokeds(ave_fname)[0]
evoked_white = whiten_evoked(evoked, cov)
evoked_white_2 = whiten_evoked(evoked, cov_reorder)
assert_allclose(evoked_white_2.data, evoked_white.data, atol=1e-7)
def _assert_reorder(cov_new, cov_orig, order):
"""Check that we get the same result under reordering."""
inv_order = np.argsort(order)
assert_array_equal([cov_new['names'][ii] for ii in inv_order],
cov_orig['names'])
assert_allclose(cov_new['data'][inv_order][:, inv_order],
cov_orig['data'], atol=1e-20)
def test_ad_hoc_cov(tmpdir):
"""Test ad hoc cov creation and I/O."""
out_fname = tmpdir.join('test-cov.fif')
evoked = read_evokeds(ave_fname)[0]
cov = make_ad_hoc_cov(evoked.info)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
std = dict(grad=2e-13, mag=10e-15, eeg=0.1e-6)
cov = make_ad_hoc_cov(evoked.info, std)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
cov['diag'] = False
cov._get_square()
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
def test_io_cov(tmpdir):
"""Test IO for noise covariance matrices."""
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
cov['loglik'] = -np.inf
cov.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(tmpdir.join('test-cov.fif'))
assert_array_almost_equal(cov.data, cov2.data)
assert_equal(cov['method'], cov2['method'])
assert_equal(cov['loglik'], cov2['loglik'])
assert 'Covariance' in repr(cov)
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
cov['bads'] = ['EEG 039']
cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))
assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])
cov_sel.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
# test warnings on bad filenames
cov_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='-cov.fif'):
write_cov(cov_badname, cov)
with pytest.warns(RuntimeWarning, match='-cov.fif'):
read_cov(cov_badname)
@pytest.mark.parametrize('method', (None, 'empirical', 'shrunk'))
def test_cov_estimation_on_raw(method, tmpdir):
"""Test estimation from raw (typically empty room)."""
if method == 'shrunk':
try:
import sklearn # noqa: F401
except Exception as exp:
pytest.skip('sklearn is required, got %s' % (exp,))
raw = read_raw_fif(raw_fname, preload=True)
cov_mne = read_cov(erm_cov_fname)
method_params = dict(shrunk=dict(shrinkage=[0]))
# The pure-string uses the more efficient numpy-based method, the
# the list gets triaged to compute_covariance (should be equivalent
# but use more memory)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(
raw, tstep=None, method=method, rank='full',
method_params=method_params)
assert_equal(cov.ch_names, cov_mne.ch_names)
assert_equal(cov.nfree, cov_mne.nfree)
assert_snr(cov.data, cov_mne.data, 1e6)
# test equivalence with np.cov
cov_np = np.cov(raw.copy().pick_channels(cov['names']).get_data(), ddof=1)
if method != 'shrunk': # can check all
off_diag = np.triu_indices(cov_np.shape[0])
else:
# We explicitly zero out off-diag entries between channel types,
# so let's just check MEG off-diag entries
off_diag = np.triu_indices(len(pick_types(raw.info, meg=True,
exclude=())))
for other in (cov_mne, cov):
assert_allclose(np.diag(cov_np), np.diag(other.data), rtol=5e-6)
assert_allclose(cov_np[off_diag], other.data[off_diag], rtol=4e-3)
assert_snr(cov.data, other.data, 1e6)
# tstep=0.2 (default)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(raw, method=method, rank='full',
method_params=method_params)
assert_equal(cov.nfree, cov_mne.nfree - 120) # cutoff some samples
assert_snr(cov.data, cov_mne.data, 170)
# test IO when computation done in Python
cov.save(tmpdir.join('test-cov.fif')) # test saving
cov_read = read_cov(tmpdir.join('test-cov.fif'))
assert cov_read.ch_names == cov.ch_names
assert cov_read.nfree == cov.nfree
assert_array_almost_equal(cov.data, cov_read.data)
# test with a subset of channels
raw_pick = raw.copy().pick_channels(raw.ch_names[:5])
raw_pick.info.normalize_proj()
cov = compute_raw_covariance(raw_pick, tstep=None, method=method,
rank='full', method_params=method_params)
assert cov_mne.ch_names[:5] == cov.ch_names
assert_snr(cov.data, cov_mne.data[:5, :5], 5e6)
cov = compute_raw_covariance(raw_pick, method=method, rank='full',
method_params=method_params)
assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps
# make sure we get a warning with too short a segment
raw_2 = read_raw_fif(raw_fname).crop(0, 1)
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw_2, method=method,
method_params=method_params)
# no epochs found due to rejection
pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None,
method='empirical', reject=dict(eog=200e-6))
# but this should work
with pytest.warns(None): # sklearn
cov = compute_raw_covariance(
raw.copy().crop(0, 10.), tstep=None, method=method,
reject=dict(eog=1000e-6), method_params=method_params,
verbose='error')
@pytest.mark.slowtest
@requires_sklearn
def test_cov_estimation_on_raw_reg():
"""Test estimation from raw with regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.info['sfreq'] /= 10.
raw = RawArray(raw._data[:, ::10].copy(), raw.info) # decimate for speed
cov_mne = read_cov(erm_cov_fname)
with pytest.warns(RuntimeWarning, match='Too few samples'):
# XXX don't use "shrunk" here, for some reason it makes Travis 2.7
# hang... "diagonal_fixed" is much faster. Use long epochs for speed.
cov = compute_raw_covariance(raw, tstep=5., method='diagonal_fixed')
assert_snr(cov.data, cov_mne.data, 5)
def _assert_cov(cov, cov_desired, tol=0.005, nfree=True):
assert_equal(cov.ch_names, cov_desired.ch_names)
err = (linalg.norm(cov.data - cov_desired.data, ord='fro') /
linalg.norm(cov.data, ord='fro'))
assert err < tol, '%s >= %s' % (err, tol)
if nfree:
assert_equal(cov.nfree, cov_desired.nfree)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None))
def test_cov_estimation_with_triggers(rank, tmpdir):
"""Test estimation from raw with triggers."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True).load_data()
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True,
reject=reject, preload=True)
cov = compute_covariance(epochs, keep_sample_mean=True)
cov_km = read_cov(cov_km_fname)
# adjust for nfree bug
cov_km['nfree'] -= 1
_assert_cov(cov, cov_km)
# Test with tmin and tmax (different but not too much)
cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
assert np.all(cov.data != cov_tmin_tmax.data)
err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
linalg.norm(cov_tmin_tmax.data, ord='fro'))
assert err < 0.05
# cov using a list of epochs and keep_sample_mean=True
epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject)
for ev_id in event_ids]
cov2 = compute_covariance(epochs, keep_sample_mean=True)
assert_array_almost_equal(cov.data, cov2.data)
assert cov.ch_names == cov2.ch_names
# cov with keep_sample_mean=False using a list of epochs
cov = compute_covariance(epochs, keep_sample_mean=False)
assert cov_km.nfree == cov.nfree
_assert_cov(cov, read_cov(cov_fname), nfree=False)
method_params = {'empirical': {'assume_centered': False}}
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method_params=method_params)
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method='shrunk', rank=rank)
# test IO when computation done in Python
cov.save(tmpdir.join('test-cov.fif')) # test saving
cov_read = read_cov(tmpdir.join('test-cov.fif'))
_assert_cov(cov, cov_read, 1e-5)
# cov with list of epochs with different projectors
epochs = [Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True),
Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=False)]
# these should fail
pytest.raises(ValueError, compute_covariance, epochs)
pytest.raises(ValueError, compute_covariance, epochs, projs=None)
# these should work, but won't be equal to above
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=[])
# test new dict support
epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0,
proj=True, reject=reject, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs, projs=[])
pytest.raises(TypeError, compute_covariance, epochs, projs='foo')
pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
def test_arithmetic_cov():
"""Test arithmetic with noise covariance matrices."""
cov = read_cov(cov_fname)
cov_sum = cov + cov
assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)
assert_array_almost_equal(2 * cov.data, cov_sum.data)
assert cov.ch_names == cov_sum.ch_names
cov += cov
assert_array_almost_equal(cov_sum.nfree, cov.nfree)
assert_array_almost_equal(cov_sum.data, cov.data)
assert cov_sum.ch_names == cov.ch_names
def test_regularize_cov():
"""Test cov regularization."""
raw = read_raw_fif(raw_fname)
raw.info['bads'].append(raw.ch_names[0]) # test with bad channels
noise_cov = read_cov(cov_fname)
# Regularize noise cov
reg_noise_cov = regularize(noise_cov, raw.info,
mag=0.1, grad=0.1, eeg=0.1, proj=True,
exclude='bads', rank='full')
assert noise_cov['dim'] == reg_noise_cov['dim']
assert noise_cov['data'].shape == reg_noise_cov['data'].shape
assert np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08
# make sure all args are represented
assert set(_DATA_CH_TYPES_SPLIT) - set(_get_args(regularize)) == set()
def test_whiten_evoked():
"""Test whitening of evoked data."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
cov = read_cov(cov_fname)
###########################################################################
# Show result
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,
exclude='bads', rank='full')
evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
assert np.all(mean_baseline < 1.)
assert np.all(mean_baseline > 0.2)
# degenerate
cov_bad = pick_channels_cov(cov, include=evoked.ch_names[:10])
pytest.raises(RuntimeError, whiten_evoked, evoked, cov_bad, picks)
def test_regularized_covariance():
"""Test unchanged data with regularized_covariance."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
data = evoked.data.copy()
# check that input data remain unchanged. gh-5698
_regularized_covariance(data)
assert_allclose(data, evoked.data, atol=1e-20)
@requires_sklearn
def test_auto_low_rank():
"""Test probabilistic low rank estimators."""
n_samples, n_features, rank = 400, 10, 5
sigma = 0.1
def get_data(n_samples, n_features, rank, sigma):
rng = np.random.RandomState(42)
W = rng.randn(n_features, n_features)
X = rng.randn(n_samples, rank)
U, _, _ = linalg.svd(W.copy())
X = np.dot(X, U[:, :rank].T)
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X += rng.randn(n_samples, n_features) * sigmas
return X
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [4, 5, 6]}
cv = 3
n_jobs = 1
mode = 'factor_analysis'
rescale = 1e8
X *= rescale
est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params,
cv=cv)
assert_equal(info['best'], rank)
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [n_features + 5]}
msg = ('You are trying to estimate %i components on matrix '
'with %i features.') % (n_features + 5, n_features)
with pytest.warns(RuntimeWarning, match=msg):
_auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params, cv=cv)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None, 'info'))
@requires_sklearn
def test_compute_covariance_auto_reg(rank):
"""Test automated regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.resample(100, npad='auto') # much faster estimation
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(mag=4e-12)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
# we need a few channels for numerical reasons in PCA/FA
picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
raw.pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
epochs = Epochs(
raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
epochs = epochs.crop(None, 0)[:5]
method_params = dict(factor_analysis=dict(iter_n_components=[3]),
pca=dict(iter_n_components=[3]))
covs = compute_covariance(epochs, method='auto',
method_params=method_params,
return_estimators=True, rank=rank)
# make sure regularization produces structured differencess
diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
off_diag_mask = np.invert(diag_mask)
for cov_a, cov_b in itt.combinations(covs, 2):
if (cov_a['method'] == 'diagonal_fixed' and
# here we have diagnoal or no regularization.
cov_b['method'] == 'empirical' and rank == 'full'):
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
# but the rest is the same
assert_allclose(cov_a['data'][off_diag_mask],
cov_b['data'][off_diag_mask], rtol=1e-12)
else:
# and here we have shrinkage everywhere.
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
logliks = [c['loglik'] for c in covs]
assert np.diff(logliks).max() <= 0 # descending order
methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage']
if rank == 'full':
methods.extend(['factor_analysis', 'pca'])
with catch_logging() as log:
cov3 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=True, rank=rank,
verbose=True)
log = log.getvalue().split('\n')
if rank is None:
assert ' Setting small MAG eigenvalues to zero (without PCA)' in log
assert 'Reducing data rank from 10 -> 7' in log
else:
assert 'Reducing' not in log
method_names = [cov['method'] for cov in cov3]
best_bounds = [-45, -35]
bounds = [-55, -45] if rank == 'full' else best_bounds
for method in set(methods) - {'empirical', 'shrunk'}:
this_lik = cov3[method_names.index(method)]['loglik']
assert bounds[0] < this_lik < bounds[1]
this_lik = cov3[method_names.index('shrunk')]['loglik']
assert best_bounds[0] < this_lik < best_bounds[1]
this_lik = cov3[method_names.index('empirical')]['loglik']
bounds = [-110, -100] if rank == 'full' else best_bounds
assert bounds[0] < this_lik < bounds[1]
assert_equal({c['method'] for c in cov3}, set(methods))
cov4 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=False, rank=rank)
assert cov3[0]['method'] == cov4['method'] # ordering
# invalid prespecified method
pytest.raises(ValueError, compute_covariance, epochs, method='pizza')
# invalid scalings
pytest.raises(ValueError, compute_covariance, epochs, method='shrunk',
scalings=dict(misc=123))
def _cov_rank(cov, info, proj=True):
# ignore warnings about rank mismatches: sometimes we will intentionally
# violate the computed/info assumption, such as when using SSS with
# `rank='full'`
with pytest.warns(None):
return _compute_rank_int(cov, info=info, proj=proj)
@pytest.fixture(scope='module')
def raw_epochs_events():
"""Create raw, epochs, and events for tests."""
raw = read_raw_fif(raw_fname).set_eeg_reference(projection=True).crop(0, 3)
raw = maxwell_filter(raw, regularize=None) # heavily reduce the rank
assert raw.info['bads'] == [] # no bads
events = make_fixed_length_events(raw)
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
return (raw, epochs, events)
@requires_sklearn
@pytest.mark.parametrize('rank', (None, 'full', 'info'))
def test_low_rank_methods(rank, raw_epochs_events):
"""Test low-rank covariance matrix estimation."""
epochs = raw_epochs_events[1]
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
methods = ('empirical', 'diagonal_fixed', 'oas')
bounds = {
'None': dict(empirical=(-15000, -5000),
diagonal_fixed=(-1500, -500),
oas=(-700, -600)),
'full': dict(empirical=(-18000, -8000),
diagonal_fixed=(-2000, -1600),
oas=(-1600, -1000)),
'info': dict(empirical=(-15000, -5000),
diagonal_fixed=(-700, -600),
oas=(-700, -600)),
}
with pytest.warns(RuntimeWarning, match='Too few samples'):
covs = compute_covariance(
epochs, method=methods, return_estimators=True, rank=rank,
verbose=True)
for cov in covs:
method = cov['method']
these_bounds = bounds[str(rank)][method]
this_rank = _cov_rank(cov, epochs.info, proj=(rank != 'full'))
if rank == 'full' and method != 'empirical':
assert this_rank == n_ch
else:
assert this_rank == sss_proj_rank
assert these_bounds[0] < cov['loglik'] < these_bounds[1], \
(rank, method)
@requires_sklearn
def test_low_rank_cov(raw_epochs_events):
"""Test additional properties of low rank computations."""
raw, epochs, events = raw_epochs_events
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
proj_rank = 365 # one EEG proj
with pytest.warns(RuntimeWarning, match='Too few samples'):
emp_cov = compute_covariance(epochs)
# Test equivalence with mne.cov.regularize subspace
with pytest.raises(ValueError, match='are dependent.*must equal'):
regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2)
assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == proj_rank
with pytest.warns(RuntimeWarning, match='exceeds the theoretical'):
_compute_rank_int(reg_cov, info=epochs.info)
del reg_cov
with catch_logging() as log:
reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None,
verbose=True)
log = log.getvalue()
assert 'jointly' in log
assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank
reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank
assert_allclose(reg_r_only_cov['data'], reg_r_cov['data'])
del reg_r_only_cov, reg_r_cov
# test that rank=306 is same as rank='full'
epochs_meg = epochs.copy().pick_types(meg=True)
assert len(epochs_meg.ch_names) == 306
epochs_meg.info.update(bads=[], projs=[])
cov_full = compute_covariance(epochs_meg, method='oas',
rank='full', verbose='error')
assert _cov_rank(cov_full, epochs_meg.info) == 306
with pytest.warns(RuntimeWarning, match='few samples'):
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306))
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306), verbose='error')
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
# Work with just EEG data to simplify projection / rank reduction
raw = raw.copy().pick_types(meg=False, eeg=True)
n_proj = 2
raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj))
n_ch = len(raw.ch_names)
rank = n_ch - n_proj - 1 # plus avg proj
assert len(raw.info['projs']) == 3
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
assert len(raw.ch_names) == n_ch
emp_cov = compute_covariance(epochs, rank='full', verbose='error')
assert _cov_rank(emp_cov, epochs.info) == rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == rank
reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_cov, epochs.info) == rank
dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed',
verbose='error')
assert _cov_rank(dia_cov, epochs.info) == rank
assert_allclose(dia_cov['data'], reg_cov['data'])
epochs.pick_channels(epochs.ch_names[:103])
# degenerate
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='pca')
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='factor_analysis')
@testing.requires_testing_data
@requires_sklearn
def test_cov_ctf():
"""Test basic cov computation on ctf data with/without compensation."""
raw = read_raw_ctf(ctf_fname).crop(0., 2.).load_data()
events = make_fixed_length_events(raw, 99999)
assert len(events) == 2
ch_names = [raw.info['ch_names'][pick]
for pick in pick_types(raw.info, meg=True, eeg=False,
ref_meg=False)]
for comp in [0, 1]:
raw.apply_gradient_compensation(comp)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0.,
method=['empirical'])
prepare_noise_cov(noise_cov, raw.info, ch_names)
raw.apply_gradient_compensation(0)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0., method=['empirical'])
raw.apply_gradient_compensation(1)
# TODO This next call in principle should fail.
prepare_noise_cov(noise_cov, raw.info, ch_names)
# make sure comps matrices was not removed from raw
assert raw.info['comps'], 'Comps matrices removed'
def test_equalize_channels():
"""Test equalization of channels for instances of Covariance."""
cov1 = make_ad_hoc_cov(create_info(['CH1', 'CH2', 'CH3', 'CH4'], sfreq=1.0,
ch_types='eeg'))
cov2 = make_ad_hoc_cov(create_info(['CH5', 'CH1', 'CH2'], sfreq=1.0,
ch_types='eeg'))
cov1, cov2 = equalize_channels([cov1, cov2])
assert cov1.ch_names == ['CH1', 'CH2']
assert cov2.ch_names == ['CH1', 'CH2']
run_tests_if_main()
|
bsd-3-clause
|
olakiril/pipeline
|
python/pipeline/reso.py
|
1
|
79661
|
""" Schemas for resonant scanners."""
import datajoint as dj
from datajoint.jobs import key_hash
import matplotlib.pyplot as plt
import numpy as np
import scanreader
from . import experiment, notify, shared
from .utils import galvo_corrections, signal, quality, mask_classification, performance
from .exceptions import PipelineException
schema = dj.schema('pipeline_reso', locals(), create_tables=False)
CURRENT_VERSION = 1
@schema
class Version(dj.Manual):
definition = """ # versions for the reso pipeline
-> shared.PipelineVersion
---
description = '' : varchar(256) # any notes on this version
date = CURRENT_TIMESTAMP : timestamp # automatic
"""
@schema
class ScanInfo(dj.Imported):
definition = """ # master table with general data about the scans
-> experiment.Scan
-> Version # reso version
---
nfields : tinyint # number of fields
nchannels : tinyint # number of channels
nframes : int # number of recorded frames
nframes_requested : int # number of requested frames (from header)
px_height : smallint # lines per frame
px_width : smallint # pixels per line
um_height : float # height in microns
um_width : float # width in microns
x : float # (um) center of scan in the motor coordinate system
y : float # (um) center of scan in the motor coordinate system
fps : float # (Hz) frames per second
zoom : decimal(5,2) # zoom factor
bidirectional : boolean # true = bidirectional scanning
usecs_per_line : float # microseconds per scan line
fill_fraction : float # raster scan temporal fill fraction (see scanimage)
valid_depth=false : boolean # whether depth has been manually check
"""
@property
def key_source(self):
rigs = [{'rig': '2P2'}, {'rig': '2P3'}, {'rig': '2P5'}, {'rig': '3P1'}]
reso_scans = experiment.Scan() & (experiment.Session() & rigs)
return reso_scans * (Version() & {'pipe_version': CURRENT_VERSION})
class Field(dj.Part):
definition = """ # field-specific information
-> ScanInfo
-> shared.Field
---
z : float # (um) absolute depth with respect to the surface of the cortex
delay_image : longblob # (ms) delay between the start of the scan and pixels in this field
"""
def make(self, key):
""" Read some scan parameters and compute FOV in microns."""
from decimal import Decimal
# Read the scan
print('Reading header...')
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Get attributes
tuple_ = key.copy() # in case key is reused somewhere else
tuple_['nfields'] = scan.num_fields
tuple_['nchannels'] = scan.num_channels
tuple_['nframes'] = scan.num_frames
tuple_['nframes_requested'] = scan.num_requested_frames
tuple_['px_height'] = scan.image_height
tuple_['px_width'] = scan.image_width
tuple_['x'] = scan.motor_position_at_zero[0]
tuple_['y'] = scan.motor_position_at_zero[1]
tuple_['fps'] = scan.fps
tuple_['zoom'] = Decimal(str(scan.zoom))
tuple_['bidirectional'] = scan.is_bidirectional
tuple_['usecs_per_line'] = scan.seconds_per_line * 1e6
tuple_['fill_fraction'] = scan.temporal_fill_fraction
tuple_['valid_depth'] = True
# Estimate height and width in microns using measured FOVs for similar setups
fov_rel = (experiment.FOV() * experiment.Session() * experiment.Scan() & key
& 'session_date>=fov_ts')
zooms = fov_rel.fetch('mag').astype(np.float32) # zooms measured in same setup
closest_zoom = zooms[np.argmin(np.abs(np.log(zooms / scan.zoom)))]
dims = (fov_rel & 'ABS(mag - {}) < 1e-4'.format(closest_zoom)).fetch1('height', 'width')
um_height, um_width = [float(um) * (closest_zoom / scan.zoom) for um in dims]
tuple_['um_height'] = um_height * scan._y_angle_scale_factor
tuple_['um_width'] = um_width * scan._x_angle_scale_factor
# Insert in ScanInfo
self.insert1(tuple_)
# Compute field depths with respect to surface
surf_z = (experiment.Scan() & key).fetch1('depth') # surface depth in motor coordinates
motor_zero = surf_z - scan.motor_position_at_zero[2]
if scan.is_slow_stack and not scan.is_slow_stack_with_fastZ: # using motor
# Correct for motor and fastZ pointing in different directions
initial_fastZ = scan.initial_secondary_z or 0
rel_field_depths = 2 * initial_fastZ - np.array(scan.field_depths)
else: # using fastZ
rel_field_depths = np.array(scan.field_depths)
field_depths = motor_zero + rel_field_depths
# Insert field information
for field_id, (field_z, field_offsets) in enumerate(zip(field_depths, scan.field_offsets)):
ScanInfo.Field().insert1({**key, 'field': field_id + 1, 'z': field_z,
'delay_image': field_offsets})
# Fill in CorrectionChannel if only one channel
if scan.num_channels == 1:
CorrectionChannel().fill(key)
# Fill SegmentationTask if scan in autosegment
if experiment.AutoProcessing() & key & {'autosegment': True}:
SegmentationTask().fill(key)
@property
def microns_per_pixel(self):
""" Returns an array with microns per pixel in height and width. """
um_height, px_height, um_width, px_width = self.fetch1('um_height', 'px_height',
'um_width', 'px_width')
return np.array([um_height / px_height, um_width / px_width])
@schema
class Quality(dj.Computed):
definition = """ # different quality metrics for a scan (before corrections)
-> ScanInfo
"""
@property
def key_source(self):
return ScanInfo() & {'pipe_version': CURRENT_VERSION}
class MeanIntensity(dj.Part):
definition = """ # mean intensity values across time
-> Quality
-> shared.Field
-> shared.Channel
---
intensities : longblob
"""
class SummaryFrames(dj.Part):
definition = """ # 16-part summary of the scan (mean of 16 blocks)
-> Quality
-> shared.Field
-> shared.Channel
---
summary : longblob # h x w x 16
"""
class Contrast(dj.Part):
definition = """ # difference between 99 and 1 percentile across time
-> Quality
-> shared.Field
-> shared.Channel
---
contrasts : longblob
"""
class QuantalSize(dj.Part):
definition = """ # quantal size in images
-> Quality
-> shared.Field
-> shared.Channel
---
min_intensity : int # min value in movie
max_intensity : int # max value in movie
quantal_size : float # variance slope, corresponds to quantal size
zero_level : int # level corresponding to zero (computed from variance dependence)
quantal_frame : longblob # average frame expressed in quanta
"""
class EpileptiformEvents(dj.Part):
definition = """ # compute frequency of epileptiform events
-> Quality
-> shared.Field
-> shared.Channel
---
frequency : float # (events / sec) frequency of epileptiform events
abn_indices : longblob # indices of epileptiform events (0-based)
peak_indices : longblob # indices of all local maxima peaks (0-based)
prominences : longblob # peak prominence for all peaks
widths : longblob # (secs) width at half prominence for all peaks
"""
def make(self, key):
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Insert in Quality
self.insert1(key)
for field_id in range(scan.num_fields):
print('Computing quality metrics for field', field_id + 1)
for channel in range(scan.num_channels):
# Map: Compute quality metrics in parallel
results = performance.map_frames(performance.parallel_quality_metrics,
scan, field_id=field_id, channel=channel)
# Reduce
mean_intensities = np.zeros(scan.num_frames)
contrasts = np.zeros(scan.num_frames)
for frames, chunk_mis, chunk_contrasts, _ in results:
mean_intensities[frames] = chunk_mis
contrasts[frames] = chunk_contrasts
sorted_results = sorted(results, key=lambda res: res[0])
mean_groups = np.array_split([r[3] for r in sorted_results], 16) # 16 groups
frames = np.stack([np.mean(g, axis=0) for g in mean_groups if g.any()], axis=-1)
# Compute quantal size
middle_frame = int(np.floor(scan.num_frames / 2))
mini_scan = scan[field_id, :, :, channel, max(middle_frame - 2000, 0): middle_frame + 2000]
mini_scan = mini_scan.astype(np.float32)
results = quality.compute_quantal_size(mini_scan)
min_intensity, max_intensity, _, _, quantal_size, zero_level = results
quantal_frame = (np.mean(mini_scan, axis=-1) - zero_level) / quantal_size
# Compute abnormal event frequency
deviations = (mean_intensities - mean_intensities.mean()) / mean_intensities.mean()
peaks, prominences, widths = quality.find_peaks(deviations)
widths = [w / scan.fps for w in widths] # in seconds
abnormal = peaks[[p > 0.2 and w < 0.4 for p, w in zip(prominences, widths)]]
abnormal_freq = len(abnormal) / (scan.num_frames / scan.fps)
# Insert
field_key = {**key, 'field': field_id + 1, 'channel': channel + 1}
self.MeanIntensity().insert1({**field_key, 'intensities': mean_intensities})
self.Contrast().insert1({**field_key, 'contrasts': contrasts})
self.SummaryFrames().insert1({**field_key, 'summary': frames})
self.QuantalSize().insert1({**field_key, 'min_intensity': min_intensity,
'max_intensity': max_intensity,
'quantal_size': quantal_size,
'zero_level': zero_level,
'quantal_frame': quantal_frame})
self.EpileptiformEvents.insert1({**field_key, 'frequency': abnormal_freq,
'abn_indices': abnormal,
'peak_indices': peaks,
'prominences': prominences,
'widths': widths})
self.notify(field_key, frames, mean_intensities, contrasts)
@notify.ignore_exceptions
def notify(self, key, summary_frames, mean_intensities, contrasts):
# Send summary frames
import imageio
video_filename = '/tmp/' + key_hash(key) + '.gif'
percentile_99th = np.percentile(summary_frames, 99.5)
summary_frames = np.clip(summary_frames, None, percentile_99th)
summary_frames = signal.float2uint8(summary_frames).transpose([2, 0, 1])
imageio.mimsave(video_filename, summary_frames, duration=0.4)
msg = ('summary frames for {animal_id}-{session}-{scan_idx} field {field} '
'channel {channel}').format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg)
# Send intensity and contrasts
fig, axes = plt.subplots(2, 1, figsize=(15, 8), sharex=True)
axes[0].set_title('Mean intensity', size='small')
axes[0].plot(mean_intensities)
axes[0].set_ylabel('Pixel intensities')
axes[1].set_title('Contrast (99 - 1 percentile)', size='small')
axes[1].plot(contrasts)
axes[1].set_xlabel('Frames')
axes[1].set_ylabel('Pixel intensities')
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('quality traces for {animal_id}-{session}-{scan_idx} field {field} '
'channel {channel}').format(**key)
slack_user.notify(file=img_filename, file_title=msg)
@schema
class CorrectionChannel(dj.Manual):
definition = """ # channel to use for raster and motion correction
-> experiment.Scan
-> shared.Field
---
-> shared.Channel
"""
def fill(self, key, channel=1):
for field_key in (ScanInfo.Field() & key).fetch(dj.key):
self.insert1({**field_key, 'channel': channel}, ignore_extra_fields=True,
skip_duplicates=True)
@schema
class RasterCorrection(dj.Computed):
definition = """ # raster correction for bidirectional resonant scans
-> ScanInfo # animal_id, session, scan_idx, version
-> CorrectionChannel # animal_id, session, scan_idx, field
---
raster_template : longblob # average frame from the middle of the movie
raster_phase : float # difference between expected and recorded scan angle
"""
@property
def key_source(self):
return ScanInfo * CorrectionChannel & {'pipe_version': CURRENT_VERSION}
def make(self, key):
from scipy.signal import tukey
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename, dtype=np.float32)
# Select correction channel
channel = (CorrectionChannel() & key).fetch1('channel') - 1
field_id = key['field'] - 1
# Load some frames from the middle of the scan
middle_frame = int(np.floor(scan.num_frames / 2))
frames = slice(max(middle_frame - 1000, 0), middle_frame + 1000)
mini_scan = scan[field_id, :, :, channel, frames]
# Create results tuple
tuple_ = key.copy()
# Create template (average frame tapered to avoid edge artifacts)
taper = np.sqrt(np.outer(tukey(scan.image_height, 0.4),
tukey(scan.image_width, 0.4)))
anscombed = 2 * np.sqrt(mini_scan - mini_scan.min() + 3 / 8) # anscombe transform
template = np.mean(anscombed, axis=-1) * taper
tuple_['raster_template'] = template
# Compute raster correction parameters
if scan.is_bidirectional:
tuple_['raster_phase'] = galvo_corrections.compute_raster_phase(template,
scan.temporal_fill_fraction)
else:
tuple_['raster_phase'] = 0
# Insert
self.insert1(tuple_)
def get_correct_raster(self):
""" Returns a function to perform raster correction on the scan. """
raster_phase = self.fetch1('raster_phase')
fill_fraction = (ScanInfo() & self).fetch1('fill_fraction')
if abs(raster_phase) < 1e-7:
correct_raster = lambda scan: scan.astype(np.float32, copy=False)
else:
correct_raster = lambda scan: galvo_corrections.correct_raster(scan,
raster_phase, fill_fraction)
return correct_raster
@schema
class MotionCorrection(dj.Computed):
definition = """ # motion correction for galvo scans
-> RasterCorrection
---
motion_template : longblob # image used as alignment template
y_shifts : longblob # (pixels) y motion correction shifts
x_shifts : longblob # (pixels) x motion correction shifts
y_std : float # (pixels) standard deviation of y shifts
x_std : float # (pixels) standard deviation of x shifts
outlier_frames : longblob # mask with true for frames with outlier shifts (already corrected)
align_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return RasterCorrection() & {'pipe_version': CURRENT_VERSION}
def make(self, key):
"""Computes the motion shifts per frame needed to correct the scan."""
from scipy import ndimage
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Get some params
px_height, px_width = (ScanInfo() & key).fetch1('px_height', 'px_width')
channel = (CorrectionChannel() & key).fetch1('channel') - 1
field_id = key['field'] - 1
# Load some frames from middle of scan to compute template
skip_rows = int(round(px_height * 0.10)) # we discard some rows/cols to avoid edge artifacts
skip_cols = int(round(px_width * 0.10))
middle_frame = int(np.floor(scan.num_frames / 2))
mini_scan = scan[field_id, skip_rows: -skip_rows, skip_cols: -skip_cols, channel,
max(middle_frame - 1000, 0): middle_frame + 1000]
mini_scan = mini_scan.astype(np.float32, copy=False)
# Correct mini scan
correct_raster = (RasterCorrection() & key).get_correct_raster()
mini_scan = correct_raster(mini_scan)
# Create template
mini_scan = 2 * np.sqrt(mini_scan - mini_scan.min() + 3 / 8) # *
template = np.mean(mini_scan, axis=-1)
template = ndimage.gaussian_filter(template, 0.7) # **
# * Anscombe tranform to normalize noise, increase contrast and decrease outliers' leverage
# ** Small amount of gaussian smoothing to get rid of high frequency noise
# Map: compute motion shifts in parallel
f = performance.parallel_motion_shifts # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'template': template}
results = performance.map_frames(f, scan, field_id=field_id,
y=slice(skip_rows, -skip_rows),
x=slice(skip_cols, -skip_cols), channel=channel,
kwargs=kwargs)
# Reduce
y_shifts = np.zeros(scan.num_frames)
x_shifts = np.zeros(scan.num_frames)
for frames, chunk_y_shifts, chunk_x_shifts in results:
y_shifts[frames] = chunk_y_shifts
x_shifts[frames] = chunk_x_shifts
# Detect outliers
max_y_shift, max_x_shift = 20 / (ScanInfo() & key).microns_per_pixel
y_shifts, x_shifts, outliers = galvo_corrections.fix_outliers(y_shifts, x_shifts,
max_y_shift,
max_x_shift)
# Center shifts around zero
y_shifts -= np.median(y_shifts)
x_shifts -= np.median(x_shifts)
# Create results tuple
tuple_ = key.copy()
tuple_['field'] = field_id + 1
tuple_['motion_template'] = template
tuple_['y_shifts'] = y_shifts
tuple_['x_shifts'] = x_shifts
tuple_['outlier_frames'] = outliers
tuple_['y_std'] = np.std(y_shifts)
tuple_['x_std'] = np.std(x_shifts)
# Insert
self.insert1(tuple_)
# Notify after all fields have been processed
scan_key = {'animal_id': key['animal_id'], 'session': key['session'],
'scan_idx': key['scan_idx'], 'pipe_version': key['pipe_version']}
if len(MotionCorrection - CorrectionChannel & scan_key) > 0:
self.notify(scan_key, scan.num_frames, scan.num_fields)
@notify.ignore_exceptions
def notify(self, key, num_frames, num_fields):
fps = (ScanInfo() & key).fetch1('fps')
seconds = np.arange(num_frames) / fps
fig, axes = plt.subplots(num_fields, 1, figsize=(15, 4 * num_fields), sharey=True)
axes = [axes] if num_fields == 1 else axes # make list if single axis object
for i in range(num_fields):
y_shifts, x_shifts = (self & key & {'field': i + 1}).fetch1('y_shifts',
'x_shifts')
axes[i].set_title('Shifts for field {}'.format(i + 1))
axes[i].plot(seconds, y_shifts, label='y shifts')
axes[i].plot(seconds, x_shifts, label='x shifts')
axes[i].set_ylabel('Pixels')
axes[i].set_xlabel('Seconds')
axes[i].legend()
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'motion shifts for {animal_id}-{session}-{scan_idx}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def save_video(self, filename='galvo_corrections.mp4', channel=1, start_index=0,
seconds=30, dpi=250):
""" Creates an animation video showing the original vs corrected scan.
:param string filename: Output filename (path + filename)
:param int channel: What channel from the scan to use. Starts at 1
:param int start_index: Where in the scan to start the video.
:param int seconds: How long in seconds should the animation run.
:param int dpi: Dots per inch, controls the quality of the video.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get fps and total_num_frames
fps = (ScanInfo() & self).fetch1('fps')
num_video_frames = int(round(fps * seconds))
stop_index = start_index + num_video_frames
# Load the scan
scan_filename = (experiment.Scan() & self).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename, dtype=np.float32)
scan_ = scan[self.fetch1('field') - 1, :, :, channel - 1, start_index: stop_index]
original_scan = scan_.copy()
# Correct the scan
correct_raster = (RasterCorrection() & self).get_correct_raster()
correct_motion = self.get_correct_motion()
corrected_scan = correct_motion(correct_raster(scan_), slice(start_index, stop_index))
# Create animation
import matplotlib.animation as animation
## Set the figure
fig, axes = plt.subplots(1, 2, sharex=True, sharey=True)
axes[0].set_title('Original')
im1 = axes[0].imshow(original_scan[:, :, 0], vmin=original_scan.min(),
vmax=original_scan.max()) # just a placeholder
fig.colorbar(im1, ax=axes[0])
axes[0].axis('off')
axes[1].set_title('Corrected')
im2 = axes[1].imshow(corrected_scan[:, :, 0], vmin=corrected_scan.min(),
vmax=corrected_scan.max()) # just a placeholder
fig.colorbar(im2, ax=axes[1])
axes[1].axis('off')
## Make the animation
def update_img(i):
im1.set_data(original_scan[:, :, i])
im2.set_data(corrected_scan[:, :, i])
video = animation.FuncAnimation(fig, update_img, corrected_scan.shape[2],
interval=1000 / fps)
# Save animation
if not filename.endswith('.mp4'):
filename += '.mp4'
print('Saving video at:', filename)
print('If this takes too long, stop it and call again with dpi <', dpi, '(default)')
video.save(filename, dpi=dpi)
return fig
def get_correct_motion(self):
""" Returns a function to perform motion correction on scans. """
x_shifts, y_shifts = self.fetch1('x_shifts', 'y_shifts')
return lambda scan, indices=slice(None): galvo_corrections.correct_motion(scan,
x_shifts[indices], y_shifts[indices])
@schema
class SummaryImages(dj.Computed):
definition = """ # summary images for each field and channel after corrections
-> MotionCorrection
-> shared.Channel
"""
@property
def key_source(self):
return MotionCorrection() & {'pipe_version': CURRENT_VERSION}
class Average(dj.Part):
definition = """ # mean of each pixel across time
-> master
---
average_image : longblob
"""
class Correlation(dj.Part):
definition = """ # average temporal correlation between each pixel and its eight neighbors
-> master
---
correlation_image : longblob
"""
class L6Norm(dj.Part):
definition = """ # l6-norm of each pixel across time
-> master
---
l6norm_image : longblob
"""
def make(self, key):
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
for channel in range(scan.num_channels):
# Map: Compute some statistics in different chunks of the scan
f = performance.parallel_summary_images # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'y_shifts': y_shifts, 'x_shifts': x_shifts}
results = performance.map_frames(f, scan, field_id=key['field'] - 1,
channel=channel, kwargs=kwargs)
# Reduce: Compute average images
average_image = np.sum([r[0] for r in results], axis=0) / scan.num_frames
l6norm_image = np.sum([r[1] for r in results], axis=0) ** (1 / 6)
# Reduce: Compute correlation image
sum_x = np.sum([r[2] for r in results], axis=0) # h x w
sum_sqx = np.sum([r[3] for r in results], axis=0) # h x w
sum_xy = np.sum([r[4] for r in results], axis=0) # h x w x 8
denom_factor = np.sqrt(scan.num_frames * sum_sqx - sum_x ** 2)
corrs = np.zeros(sum_xy.shape)
for k in [0, 1, 2, 3]:
rotated_corrs = np.rot90(corrs, k=k)
rotated_sum_x = np.rot90(sum_x, k=k)
rotated_dfactor = np.rot90(denom_factor, k=k)
rotated_sum_xy = np.rot90(sum_xy, k=k)
# Compute correlation
rotated_corrs[1:, :, k] = (scan.num_frames * rotated_sum_xy[1:, :, k] -
rotated_sum_x[1:] * rotated_sum_x[:-1]) / \
(rotated_dfactor[1:] * rotated_dfactor[:-1])
rotated_corrs[1:, 1:, 4 + k] = ((scan.num_frames * rotated_sum_xy[1:, 1:, 4 + k] -
rotated_sum_x[1:, 1:] * rotated_sum_x[:-1, : -1]) /
(rotated_dfactor[1:, 1:] * rotated_dfactor[:-1, :-1]))
# Return back to original orientation
corrs = np.rot90(rotated_corrs, k=4 - k)
correlation_image = np.sum(corrs, axis=-1)
norm_factor = 5 * np.ones(correlation_image.shape) # edges
norm_factor[[0, -1, 0, -1], [0, -1, -1, 0]] = 3 # corners
norm_factor[1:-1, 1:-1] = 8 # center
correlation_image /= norm_factor
# Insert
field_key = {**key, 'channel': channel + 1}
self.insert1(field_key)
SummaryImages.Average().insert1({**field_key, 'average_image': average_image})
SummaryImages.L6Norm().insert1({**field_key, 'l6norm_image': l6norm_image})
SummaryImages.Correlation().insert1({**field_key,
'correlation_image': correlation_image})
self.notify(key, scan.num_channels)
@notify.ignore_exceptions
def notify(self, key, num_channels):
fig, axes = plt.subplots(num_channels, 2, squeeze=False, figsize=(12, 5 * num_channels))
axes[0, 0].set_title('L6-Norm', size='small')
axes[0, 1].set_title('Correlation', size='small')
for ax in axes.ravel():
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
for channel in range(num_channels):
axes[channel, 0].set_ylabel('Channel {}'.format(channel + 1), size='large',
rotation='horizontal', ha='right')
corr = (SummaryImages.Correlation() & key & {'channel': channel + 1}).fetch1('correlation_image')
l6norm = (SummaryImages.L6Norm() & key & {'channel': channel + 1}).fetch1('l6norm_image')
axes[channel, 0].imshow(l6norm)
axes[channel, 1].imshow(corr)
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'summary images for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg, channel='#pipeline_quality')
@schema
class SegmentationTask(dj.Manual):
definition = """ # defines the target of segmentation and the channel to use
-> experiment.Scan
-> shared.Field
-> shared.Channel
-> shared.SegmentationMethod
---
-> experiment.Compartment
"""
def fill(self, key, channel=1, segmentation_method=6, compartment='soma'):
for field_key in (ScanInfo.Field() & key).fetch(dj.key):
tuple_ = {**field_key, 'channel': channel, 'compartment': compartment,
'segmentation_method': segmentation_method}
self.insert1(tuple_, ignore_extra_fields=True, skip_duplicates=True)
def estimate_num_components(self):
""" Estimates the number of components per field using simple rules of thumb.
For somatic scans, estimate number of neurons based on:
(100x100x100)um^3 = 1e6 um^3 -> 100 neurons; (1x1x1)mm^3 = 1e9 um^3 -> 100K neurons
For axonal/dendritic scans, just ten times our estimate of neurons.
:returns: Number of components
:rtype: int
"""
# Get field dimensions (in microns)
scan = (ScanInfo() & self & {'pipe_version': CURRENT_VERSION})
field_height, field_width = scan.fetch1('um_height', 'um_width')
field_thickness = 10 # assumption
field_volume = field_width * field_height * field_thickness
# Estimate number of components
compartment = self.fetch1('compartment')
if compartment == 'soma':
num_components = field_volume * 0.0001
elif compartment == 'axon':
num_components = field_volume * 0.0005 # five times as many neurons
elif compartment == 'bouton':
num_components = field_volume * 0.001 # 10 times as many neurons
else:
PipelineException("Compartment type '{}' not recognized".format(compartment))
return int(round(num_components))
@schema
class DoNotSegment(dj.Manual):
definition = """ # field/channels that should not be segmented (used for web interface only)
-> experiment.Scan
-> shared.Field
-> shared.Channel
"""
@schema
class Segmentation(dj.Computed):
definition = """ # Different mask segmentations.
-> MotionCorrection # animal_id, session, scan_idx, version, field
-> SegmentationTask # animal_id, session, scan_idx, field, channel, segmentation_method
---
segmentation_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return MotionCorrection() * SegmentationTask() & {'pipe_version': CURRENT_VERSION}
class Mask(dj.Part):
definition = """ # mask produced by segmentation.
-> Segmentation
mask_id : smallint
---
pixels : longblob # indices into the image in column major (Fortran) order
weights : longblob # weights of the mask at the indices above
"""
def get_mask_as_image(self):
""" Return this mask as an image (2-d numpy array)."""
# Get params
pixels, weights = self.fetch('pixels', 'weights')
image_height, image_width = (ScanInfo() & self).fetch1('px_height', 'px_width')
# Reshape mask
mask = Segmentation.reshape_masks(pixels, weights, image_height, image_width)
return np.squeeze(mask)
class Manual(dj.Part):
definition = """ # masks created manually
-> Segmentation
"""
def make(self, key):
print('Warning: Manual segmentation is not implemented in Python.')
# Copy any masks (and MaskClassification) that were there before
# Delete key from Segmentation (this is needed for trace and ScanSet and Activity computation to restart when things are added)
# Show GUI with the current masks
# User modifies it somehow to produce the new set of masks
# Insert info in Segmentation -> Segmentation.Manual -> Segmentation.Mask -> MaskClassification -> MaskClassification.Type
class CNMF(dj.Part):
definition = """ # source extraction using constrained non-negative matrix factorization
-> Segmentation
---
params : varchar(1024) # parameters send to CNMF as JSON array
"""
def make(self, key):
""" Use CNMF to extract masks and traces.
See caiman_interface.extract_masks for explanation of parameters
"""
from .utils import caiman_interface as cmn
import json
import uuid
import os
print('')
print('*' * 85)
print('Processing {}'.format(key))
# Get some parameters
field_id = key['field'] - 1
channel = key['channel'] - 1
image_height, image_width = (ScanInfo() & key).fetch1('px_height', 'px_width')
num_frames = (ScanInfo() & key).fetch1('nframes')
# Read scan
print('Reading scan...')
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Create memory mapped file (as expected by CaImAn)
print('Creating memory mapped file...')
filename = '/tmp/caiman-{}_d1_{}_d2_{}_d3_1_order_C_frames_{}_.mmap'.format(
uuid.uuid4(), image_height, image_width, num_frames)
mmap_shape = (image_height * image_width, num_frames)
mmap_scan = np.memmap(filename, mode='w+', shape=mmap_shape, dtype=np.float32)
# Map: Correct scan and save in memmap scan
f = performance.parallel_save_memmap # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction, 'y_shifts': y_shifts,
'x_shifts': x_shifts, 'mmap_scan': mmap_scan}
results = performance.map_frames(f, scan, field_id=field_id, channel=channel, kwargs=kwargs)
# Reduce: Use the minimum values to make memory mapped scan nonnegative
mmap_scan -= np.min(results) # bit inefficient but necessary
# Set CNMF parameters
## Set general parameters
kwargs = {}
kwargs['num_background_components'] = 1
kwargs['merge_threshold'] = 0.7
kwargs['fps'] = (ScanInfo() & key).fetch1('fps')
# Set params specific to method and segmentation target
target = (SegmentationTask() & key).fetch1('compartment')
if key['segmentation_method'] == 2: # nmf
if target == 'axon':
kwargs['init_on_patches'] = True
kwargs['proportion_patch_overlap'] = 0.2 # 20% overlap
kwargs['num_components_per_patch'] = 15
kwargs['init_method'] = 'sparse_nmf'
kwargs['snmf_alpha'] = 500 # 10^2 to 10^3.5 is a good range
kwargs['patch_size'] = tuple(50 / (ScanInfo() & key).microns_per_pixel) # 50 x 50 microns
elif target == 'bouton':
kwargs['init_on_patches'] = False
kwargs['num_components'] = (SegmentationTask() & key).estimate_num_components()
kwargs['init_method'] = 'greedy_roi'
kwargs['soma_diameter'] = tuple(2 / (ScanInfo() & key).microns_per_pixel)
else: # soma
kwargs['init_on_patches'] = False
kwargs['num_components'] = (SegmentationTask() & key).estimate_num_components()
kwargs['init_method'] = 'greedy_roi'
kwargs['soma_diameter'] = tuple(14 / (ScanInfo() & key).microns_per_pixel)
else: #nmf-new
kwargs['init_on_patches'] = True
kwargs['proportion_patch_overlap'] = 0.2 # 20% overlap
if target == 'axon':
kwargs['num_components_per_patch'] = 15
kwargs['init_method'] = 'sparse_nmf'
kwargs['snmf_alpha'] = 500 # 10^2 to 10^3.5 is a good range
kwargs['patch_size'] = tuple(50 / (ScanInfo() & key).microns_per_pixel) # 50 x 50 microns
elif target == 'bouton':
kwargs['num_components_per_patch'] = 5
kwargs['init_method'] = 'greedy_roi'
kwargs['patch_size'] = tuple(20 / (ScanInfo() & key).microns_per_pixel) # 20 x 20 microns
kwargs['soma_diameter'] = tuple(2 / (ScanInfo() & key).microns_per_pixel)
else: # soma
kwargs['num_components_per_patch'] = 6
kwargs['init_method'] = 'greedy_roi'
kwargs['patch_size'] = tuple(50 / (ScanInfo() & key).microns_per_pixel)
kwargs['soma_diameter'] = tuple(8 / (ScanInfo() & key).microns_per_pixel)
## Set performance/execution parameters (heuristically), decrease if memory overflows
kwargs['num_processes'] = 8 # Set to None for all cores available
kwargs['num_pixels_per_process'] = 10000
# Extract traces
print('Extracting masks and traces (cnmf)...')
scan_ = mmap_scan.reshape((image_height, image_width, num_frames), order='F')
cnmf_result = cmn.extract_masks(scan_, mmap_scan, **kwargs)
(masks, traces, background_masks, background_traces, raw_traces) = cnmf_result
# Delete memory mapped scan
print('Deleting memory mapped scan...')
os.remove(mmap_scan.filename)
# Insert CNMF results
print('Inserting masks, background components and traces...')
dj.conn()
## Insert in CNMF, Segmentation and Fluorescence
self.insert1({**key, 'params': json.dumps(kwargs)})
Fluorescence().insert1(key, allow_direct_insert=True) # we also insert traces
## Insert background components
Segmentation.CNMFBackground().insert1({**key, 'masks': background_masks,
'activity': background_traces})
## Insert masks and traces (masks in Matlab format)
num_masks = masks.shape[-1]
masks = masks.reshape(-1, num_masks, order='F').T # [num_masks x num_pixels] in F order
raw_traces = raw_traces.astype(np.float32, copy=False)
for mask_id, mask, trace in zip(range(1, num_masks + 1), masks, raw_traces):
mask_pixels = np.where(mask)[0]
mask_weights = mask[mask_pixels]
mask_pixels += 1 # matlab indices start at 1
Segmentation.Mask().insert1({**key, 'mask_id': mask_id, 'pixels': mask_pixels,
'weights': mask_weights})
Fluorescence.Trace().insert1({**key, 'mask_id': mask_id, 'trace': trace})
Segmentation().notify(key)
def save_video(self, filename='cnmf_results.mp4', start_index=0, seconds=30,
dpi=250, first_n=None):
""" Creates an animation video showing the results of CNMF.
:param string filename: Output filename (path + filename)
:param int start_index: Where in the scan to start the video.
:param int seconds: How long in seconds should the animation run.
:param int dpi: Dots per inch, controls the quality of the video.
:param int first_n: Draw only the first n components.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get fps and calculate total number of frames
fps = (ScanInfo() & self).fetch1('fps')
num_video_frames = int(round(fps * seconds))
stop_index = start_index + num_video_frames
# Load the scan
channel = self.fetch1('channel') - 1
field_id = self.fetch1('field') - 1
scan_filename = (experiment.Scan() & self).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename, dtype=np.float32)
scan_ = scan[field_id, :, :, channel, start_index: stop_index]
# Correct the scan
correct_raster = (RasterCorrection() & self).get_correct_raster()
correct_motion = (MotionCorrection() & self).get_correct_motion()
scan_ = correct_motion(correct_raster(scan_), slice(start_index, stop_index))
# Get scan dimensions
image_height, image_width, _ = scan_.shape
num_pixels = image_height * image_width
# Get masks and traces
masks = (Segmentation() & self).get_all_masks()
traces = (Fluorescence() & self).get_all_traces() # always there for CNMF
background_masks, background_traces = (Segmentation.CNMFBackground() &
self).fetch1('masks', 'activity')
# Select first n components
if first_n is not None:
masks = masks[:, :, :first_n]
traces = traces[:first_n, :]
# Drop frames that won't be displayed
traces = traces[:, start_index: stop_index]
background_traces = background_traces[:, start_index: stop_index]
# Create movies
extracted = np.dot(masks.reshape(num_pixels, -1), traces)
extracted = extracted.reshape(image_height, image_width, -1)
background = np.dot(background_masks.reshape(num_pixels, -1), background_traces)
background = background.reshape(image_height, image_width, -1)
residual = scan_ - extracted - background
# Create animation
import matplotlib.animation as animation
## Set the figure
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
axes[0, 0].set_title('Original (Y)')
im1 = axes[0, 0].imshow(scan_[:, :, 0], vmin=scan_.min(), vmax=scan_.max()) # just a placeholder
fig.colorbar(im1, ax=axes[0, 0])
axes[0, 1].set_title('Extracted (A*C)')
im2 = axes[0, 1].imshow(extracted[:, :, 0], vmin=extracted.min(), vmax=extracted.max())
fig.colorbar(im2, ax=axes[0, 1])
axes[1, 0].set_title('Background (B*F)')
im3 = axes[1, 0].imshow(background[:, :, 0], vmin=background.min(),
vmax=background.max())
fig.colorbar(im3, ax=axes[1, 0])
axes[1, 1].set_title('Residual (Y - A*C - B*F)')
im4 = axes[1, 1].imshow(residual[:, :, 0], vmin=residual.min(), vmax=residual.max())
fig.colorbar(im4, ax=axes[1, 1])
for ax in axes.ravel():
ax.axis('off')
## Make the animation
def update_img(i):
im1.set_data(scan_[:, :, i])
im2.set_data(extracted[:, :, i])
im3.set_data(background[:, :, i])
im4.set_data(residual[:, :, i])
video = animation.FuncAnimation(fig, update_img, scan_.shape[2],
interval=1000 / fps)
# Save animation
if not filename.endswith('.mp4'):
filename += '.mp4'
print('Saving video at:', filename)
print('If this takes too long, stop it and call again with dpi <', dpi, '(default)')
video.save(filename, dpi=dpi)
return fig
class CNMFBackground(dj.Part):
definition = """ # inferred background components
-> Segmentation.CNMF
---
masks : longblob # array (im_height x im_width x num_background_components)
activity : longblob # array (num_background_components x timesteps)
"""
def make(self, key):
# Create masks
if key['segmentation_method'] == 1: # manual
Segmentation.Manual().make(key)
elif key['segmentation_method'] in [2, 6]: # nmf and nmf-patches
self.insert1(key)
Segmentation.CNMF().make(key)
elif key['segmentation_method'] in [3, 4]: # nmf_patches
msg = 'This method has been deprecated, use segmentation_method 6'
raise PipelineException(msg)
else:
msg = 'Unrecognized segmentation method {}'.format(key['segmentation_method'])
raise PipelineException(msg)
@notify.ignore_exceptions
def notify(self, key):
fig = (Segmentation() & key).plot_masks()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'segmentation for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
@staticmethod
def reshape_masks(mask_pixels, mask_weights, image_height, image_width):
""" Reshape masks into an image_height x image_width x num_masks array."""
masks = np.zeros([image_height, image_width, len(mask_pixels)], dtype=np.float32)
# Reshape each mask
for i, (mp, mw) in enumerate(zip(mask_pixels, mask_weights)):
mask_as_vector = np.zeros(image_height * image_width)
mask_as_vector[np.squeeze(mp - 1).astype(int)] = np.squeeze(mw)
masks[:, :, i] = mask_as_vector.reshape(image_height, image_width, order='F')
return masks
def get_all_masks(self):
"""Returns an image_height x image_width x num_masks matrix with all masks."""
mask_rel = (Segmentation.Mask() & self)
# Get masks
image_height, image_width = (ScanInfo() & self).fetch1('px_height', 'px_width')
mask_pixels, mask_weights = mask_rel.fetch('pixels', 'weights', order_by='mask_id')
# Reshape masks
masks = Segmentation.reshape_masks(mask_pixels, mask_weights, image_height, image_width)
return masks
def plot_masks(self, threshold=0.97, first_n=None):
""" Draw contours of masks over the correlation image (if available).
:param threshold: Threshold on the cumulative mass to define mask contours. Lower
for tighter contours.
:param first_n: Number of masks to plot. None for all.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get masks
masks = self.get_all_masks()
if first_n is not None:
masks = masks[:, :, :first_n]
# Get correlation image if defined, black background otherwise.
image_rel = SummaryImages.Correlation() & self
if image_rel:
background_image = image_rel.fetch1('correlation_image')
else:
background_image = np.zeros(masks.shape[:-1])
# Plot background
image_height, image_width, num_masks = masks.shape
figsize = np.array([image_width, image_height]) / min(image_height, image_width)
fig = plt.figure(figsize=figsize * 7)
plt.imshow(background_image)
# Draw contours
cumsum_mask = np.empty([image_height, image_width])
for i in range(num_masks):
mask = masks[:, :, i]
## Compute cumulative mass (similar to caiman)
indices = np.unravel_index(np.flip(np.argsort(mask, axis=None), axis=0), mask.shape) # max to min value in mask
cumsum_mask[indices] = np.cumsum(mask[indices]**2) / np.sum(mask**2)
## Plot contour at desired threshold (with random color)
random_color = (np.random.rand(), np.random.rand(), np.random.rand())
plt.contour(cumsum_mask, [threshold], linewidths=0.8, colors=[random_color])
return fig
@schema
class Fluorescence(dj.Computed):
definition = """ # fluorescence traces before spike extraction or filtering
-> Segmentation # animal_id, session, scan_idx, pipe_version, field, channel, segmentation_method
"""
@property
def key_source(self):
return Segmentation() & {'pipe_version': CURRENT_VERSION}
class Trace(dj.Part):
definition = """
-> Fluorescence
-> Segmentation.Mask
---
trace : longblob
"""
def make(self, key):
# Load scan
print('Reading scan...')
field_id = key['field'] - 1
channel = key['channel'] - 1
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Map: Extract traces
print('Creating fluorescence traces...')
f = performance.parallel_fluorescence # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
mask_ids, pixels, weights = (Segmentation.Mask() & key).fetch('mask_id', 'pixels', 'weights')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'y_shifts': y_shifts, 'x_shifts': x_shifts, 'mask_pixels': pixels,
'mask_weights': weights}
results = performance.map_frames(f, scan, field_id=field_id, channel=channel, kwargs=kwargs)
# Reduce: Concatenate
traces = np.zeros((len(mask_ids), scan.num_frames), dtype=np.float32)
for frames, chunk_traces in results:
traces[:, frames] = chunk_traces
# Insert
self.insert1(key)
for mask_id, trace in zip(mask_ids, traces):
Fluorescence.Trace().insert1({**key, 'mask_id': mask_id, 'trace': trace})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
fig = plt.figure(figsize=(15, 4))
plt.plot((Fluorescence() & key).get_all_traces().T)
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'calcium traces for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def get_all_traces(self):
""" Returns a num_traces x num_timesteps matrix with all traces."""
traces = (Fluorescence.Trace() & self).fetch('trace', order_by='mask_id')
return np.array([x.squeeze() for x in traces])
@schema
class MaskClassification(dj.Computed):
definition = """ # classification of segmented masks.
-> Segmentation # animal_id, session, scan_idx, pipe_version, field, channel, segmentation_method
-> shared.ClassificationMethod
---
classif_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return (Segmentation() * shared.ClassificationMethod() &
{'pipe_version': CURRENT_VERSION})
class Type(dj.Part):
definition = """
-> MaskClassification
-> Segmentation.Mask
---
-> shared.MaskType
"""
def make(self, key):
# Skip axonal scans
target = (SegmentationTask() & key).fetch1('compartment')
if key['classification_method'] == 2 and target != 'soma':
print('Warning: Skipping {}. Automatic classification works only with somatic '
'scans'.format(key))
return
# Get masks
image_height, image_width = (ScanInfo() & key).fetch1('px_height', 'px_width')
mask_ids, pixels, weights = (Segmentation.Mask() & key).fetch('mask_id', 'pixels', 'weights')
masks = Segmentation.reshape_masks(pixels, weights, image_height, image_width)
# Classify masks
if key['classification_method'] == 1: # manual
if not SummaryImages() & key:
msg = 'Need to populate SummaryImages before manual mask classification'
raise PipelineException(msg)
template = (SummaryImages.Correlation() & key).fetch1('correlation_image')
masks = masks.transpose([2, 0, 1]) # num_masks, image_height, image_width
mask_types = mask_classification.classify_manual(masks, template)
elif key['classification_method'] == 2: # cnn-caiman
from .utils import caiman_interface as cmn
soma_diameter = tuple(14 / (ScanInfo() & key).microns_per_pixel)
probs = cmn.classify_masks(masks, soma_diameter)
mask_types = ['soma' if prob > 0.75 else 'artifact' for prob in probs]
else:
msg = 'Unrecognized classification method {}'.format(key['classification_method'])
raise PipelineException(msg)
print('Generated types:', mask_types)
# Insert results
self.insert1(key)
for mask_id, mask_type in zip(mask_ids, mask_types):
MaskClassification.Type().insert1({**key, 'mask_id': mask_id, 'type': mask_type})
self.notify(key, mask_types)
@notify.ignore_exceptions
def notify(self, key, mask_types):
fig = (MaskClassification() & key).plot_masks()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('mask classification for {animal_id}-{session}-{scan_idx} field {field}: '
'{somas} somas and {arts} artifacts').format(**key,
somas=mask_types.count('soma'), arts=mask_types.count('artifact'))
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg, channel='#pipeline_quality')
def plot_masks(self, threshold=0.99):
""" Draw contours of masks over the correlation image (if available) with different
colors per type
:param threshold: Threshold on the cumulative mass to define mask contours. Lower
for tighter contours.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get masks
masks = (Segmentation() & self).get_all_masks()
mask_types = (MaskClassification.Type() & self).fetch('type')
colormap = {'soma': 'b', 'axon': 'k', 'dendrite': 'c', 'neuropil': 'y',
'artifact': 'r', 'unknown': 'w'}
# Get correlation image if defined, black background otherwise.
image_rel = SummaryImages.Correlation() & self
if image_rel:
background_image = image_rel.fetch1('correlation_image')
else:
background_image = np.zeros(masks.shape[:-1])
# Plot background
image_height, image_width, num_masks = masks.shape
figsize = np.array([image_width, image_height]) / min(image_height, image_width)
fig = plt.figure(figsize=figsize * 7)
plt.imshow(background_image)
# Draw contours
cumsum_mask = np.empty([image_height, image_width])
for i in range(num_masks):
mask = masks[:, :, i]
color = colormap[mask_types[i]]
## Compute cumulative mass (similar to caiman)
indices = np.unravel_index(np.flip(np.argsort(mask, axis=None), axis=0), mask.shape) # max to min value in mask
cumsum_mask[indices] = np.cumsum(mask[indices]**2) / np.sum(mask**2)
## Plot contour at desired threshold
plt.contour(cumsum_mask, [threshold], linewidths=0.8, colors=[color])
return fig
@schema
class ScanSet(dj.Computed):
definition = """ # set of all units in the same scan
-> Fluorescence # processing done per field
"""
@property
def key_source(self):
return Fluorescence() & {'pipe_version': CURRENT_VERSION}
class Unit(dj.Part):
definition = """ # single unit in the scan
-> ScanInfo
-> shared.SegmentationMethod
unit_id : int # unique per scan & segmentation method
---
-> ScanSet # for it to act as a part table of ScanSet
-> Fluorescence.Trace
"""
class UnitInfo(dj.Part):
definition = """ # unit type and coordinates in x, y, z
-> ScanSet.Unit
---
um_x : smallint # x-coordinate of centroid in motor coordinate system
um_y : smallint # y-coordinate of centroid in motor coordinate system
um_z : smallint # z-coordinate of mask relative to surface of the cortex
px_x : smallint # x-coordinate of centroid in the frame
px_y : smallint # y-coordinate of centroid in the frame
ms_delay = 0 : smallint # (ms) delay from start of frame to recording of this unit
"""
def _job_key(self, key):
# Force reservation key to be per scan so diff fields are not run in parallel
return {k: v for k, v in key.items() if k not in ['field', 'channel']}
def make(self, key):
from pipeline.utils import caiman_interface as cmn
# Get masks
image_height, image_width = (ScanInfo() & key).fetch1('px_height', 'px_width')
mask_ids, pixels, weights = (Segmentation.Mask() & key).fetch('mask_id', 'pixels', 'weights')
masks = Segmentation.reshape_masks(pixels, weights, image_height, image_width)
# Compute units' coordinates
px_center = [image_height / 2, image_width / 2]
um_center = (ScanInfo() & key).fetch1('y', 'x')
um_z = (ScanInfo.Field() & key).fetch1('z')
px_centroids = cmn.get_centroids(masks)
um_centroids = um_center + (px_centroids - px_center) * (ScanInfo() & key).microns_per_pixel
# Compute units' delays
delay_image = (ScanInfo.Field() & key).fetch1('delay_image')
delays = (np.sum(masks * np.expand_dims(delay_image, -1), axis=(0, 1)) /
np.sum(masks, axis=(0, 1)))
delays = np.round(delays * 1e3).astype(np.int16) # in milliseconds
# Get next unit_id for scan
unit_rel = (ScanSet.Unit().proj() & key)
unit_id = np.max(unit_rel.fetch('unit_id')) + 1 if unit_rel else 1
# Insert in ScanSet
self.insert1(key)
# Insert units
unit_ids = range(unit_id, unit_id + len(mask_ids) + 1)
for unit_id, mask_id, (um_y, um_x), (px_y, px_x), delay in zip(unit_ids, mask_ids,
um_centroids, px_centroids, delays):
ScanSet.Unit().insert1({**key, 'unit_id': unit_id, 'mask_id': mask_id})
unit_info = {**key, 'unit_id': unit_id, 'um_x': um_x, 'um_y': um_y,
'um_z': um_z, 'px_x': px_x, 'px_y': px_y, 'ms_delay': delay}
ScanSet.UnitInfo().insert1(unit_info, ignore_extra_fields=True)
def plot_centroids(self, first_n=None):
""" Draw masks centroids over the correlation image. Works on a single field/channel
:param first_n: Number of masks to plot. None for all
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get centroids
centroids = self.get_all_centroids(centroid_type='px')
if first_n is not None:
centroids = centroids[:, :first_n] # select first n components
# Get correlation image if defined, black background otherwise.
image_rel = SummaryImages.Correlation() & self
if image_rel:
background_image = image_rel.fetch1('correlation_image')
else:
image_height, image_width = (ScanInfo() & self).fetch1('px_height', 'px_width')
background_image = np.zeros([image_height, image_width])
# Plot centroids
image_height, image_width = background_image.shape
figsize = np.array([image_width, image_height]) / min(image_height, image_width)
fig = plt.figure(figsize=figsize * 7)
plt.imshow(background_image)
plt.plot(centroids[:, 0], centroids[:, 1], 'ow', markersize=3)
return fig
def plot_centroids3d(self):
""" Plots the centroids of all units in the motor coordinate system (in microns)
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from mpl_toolkits.mplot3d import Axes3D
# Get centroids
centroids = self.get_all_centroids()
# Plot
# TODO: Add different colors for different types, correlation image as 2-d planes
# masks from diff channels with diff colors.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(centroids[:, 0], centroids[:, 1], centroids[:, 2])
ax.invert_zaxis()
ax.set_xlabel('x (um)')
ax.set_ylabel('y (um)')
ax.set_zlabel('z (um)')
return fig
def get_all_centroids(self, centroid_type='um'):
""" Returns the centroids for all units in the scan. Could also be limited by field.
Centroid type is either 'um' or 'px':
'um': Array (num_units x 3) with x, y, z in motor coordinate system (microns).
'px': Array (num_units x 2) with x, y pixel coordinates.
"""
units_rel = ScanSet.UnitInfo() & (ScanSet.Unit() & self)
if centroid_type == 'um':
xs, ys, zs = units_rel.fetch('um_x', 'um_y', 'um_z', order_by='unit_id')
centroids = np.stack([xs, ys, zs], axis=1)
else:
xs, ys = units_rel.fetch('px_x', 'px_y', order_by='unit_id')
centroids = np.stack([xs, ys], axis=1)
return centroids
@schema
class Activity(dj.Computed):
definition = """ # activity inferred from fluorescence traces
-> ScanSet # processing done per field
-> shared.SpikeMethod
---
activity_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return ScanSet() * shared.SpikeMethod() & {'pipe_version': CURRENT_VERSION}
class Trace(dj.Part):
definition = """ # deconvolved calcium acitivity
-> ScanSet.Unit
-> shared.SpikeMethod
---
-> Activity # for it to act as part table of Activity
trace : longblob
"""
class ARCoefficients(dj.Part):
definition = """ # fitted parameters for the autoregressive process (nmf deconvolution)
-> Activity.Trace
---
g : blob # g1, g2, ... coefficients for the AR process
"""
def make(self, key):
print('Creating activity traces for', key)
# Get fluorescence
fps = (ScanInfo() & key).fetch1('fps')
unit_ids, traces = (ScanSet.Unit() * Fluorescence.Trace() & key).fetch('unit_id', 'trace')
full_traces = [signal.fill_nans(np.squeeze(trace).copy()) for trace in traces]
# Insert in Activity
self.insert1(key)
if key['spike_method'] == 2: # oopsie
import pyfnnd # Install from https://github.com/cajal/PyFNND.git
for unit_id, trace in zip(unit_ids, full_traces):
spike_trace = pyfnnd.deconvolve(trace, dt=1 / fps)[0].astype(np.float32, copy=False)
Activity.Trace().insert1({**key, 'unit_id': unit_id, 'trace': spike_trace})
elif key['spike_method'] == 3: # stm
import c2s # Install from https://github.com/lucastheis/c2s
for unit_id, trace in zip(unit_ids, full_traces):
start = signal.notnan(trace)
end = signal.notnan(trace, len(trace) - 1, increment=-1)
trace_dict = {'calcium': np.atleast_2d(trace[start:end + 1]), 'fps': fps}
data = c2s.predict(c2s.preprocess([trace_dict], fps=fps), verbosity=0)
spike_trace = np.squeeze(data[0].pop('predictions')).astype(np.float32, copy=False)
Activity.Trace().insert1({**key, 'unit_id': unit_id, 'trace': spike_trace})
elif key['spike_method'] == 5: # nmf
from pipeline.utils import caiman_interface as cmn
import multiprocessing as mp
with mp.Pool(10) as pool:
results = pool.map(cmn.deconvolve, full_traces)
for unit_id, (spike_trace, ar_coeffs) in zip(unit_ids, results):
spike_trace = spike_trace.astype(np.float32, copy=False)
Activity.Trace().insert1({**key, 'unit_id': unit_id, 'trace': spike_trace})
Activity.ARCoefficients().insert1({**key, 'unit_id': unit_id, 'g': ar_coeffs},
ignore_extra_fields=True)
else:
msg = 'Unrecognized spike method {}'.format(key['spike_method'])
raise PipelineException(msg)
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
fig = plt.figure(figsize=(15, 4))
plt.plot((Activity() & key).get_all_spikes().T)
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'spike traces for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def plot_impulse_responses(self, num_timepoints=100):
""" Plots the impulse response functions for all traces.
:param int num_timepoints: The number of points after impulse to use for plotting.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
ar_rel = Activity.ARCoefficients() & (Activity.Trace() & self)
if ar_rel: # if an AR model was used
# Get some params
fps = (ScanInfo() & self).fetch1('fps')
ar_coeffs = ar_rel.fetch('g')
# Define the figure
fig = plt.figure()
x_axis = np.arange(num_timepoints) / fps # make it seconds
# Over each trace
for g in ar_coeffs:
AR_order = len(g)
# Calculate impulse response function
irf = np.zeros(num_timepoints)
irf[0] = 1 # initial spike
for i in range(1, num_timepoints):
if i <= AR_order: # start of the array needs special care
irf[i] = np.sum(g[:i] * irf[i - 1:: -1])
else:
irf[i] = np.sum(g * irf[i - 1: i - AR_order - 1: -1])
# Plot
plt.plot(x_axis, irf)
plt.xlabel('Seconds')
return fig
def get_all_spikes(self):
""" Returns a num_traces x num_timesteps matrix with all spikes."""
spikes = (Activity.Trace() & self).fetch('trace', order_by='unit_id')
return np.array([x.squeeze() for x in spikes])
@schema
class ScanDone(dj.Computed):
definition = """ # scans that are fully processed (updated every time a field is added)
-> ScanInfo
-> shared.SegmentationMethod
-> shared.SpikeMethod
"""
@property
def key_source(self):
return Activity() & {'pipe_version': CURRENT_VERSION}
@property
def target(self):
return ScanDone.Partial() # trigger make_tuples for fields in Activity that aren't in ScanDone.Partial
def _job_key(self, key):
# Force reservation key to be per scan so diff fields are not run in parallel
return {k: v for k, v in key.items() if k not in ['field', 'channel']}
class Partial(dj.Part):
definition = """ # fields that have been processed in the current scan
-> ScanDone
-> Activity
"""
def make(self, key):
scan_key = {k: v for k, v in key.items() if k in self.heading}
# Delete current ScanDone entry
with dj.config(safemode=False):
(ScanDone() & scan_key).delete()
# Reinsert in ScanDone
self.insert1(scan_key)
# Insert all processed fields in Partial
ScanDone.Partial().insert((Activity() & scan_key).proj())
from . import stack
@schema
class StackCoordinates(dj.Computed):
definition = """ # centroids of each unit in motor/stack coordinate system
-> ScanSet # animal_id, session, scan_idx, channel, field, segmentation_method, pipe_version
-> stack.Registration.proj(session='scan_session') # animal_id, stack_session, stack_idx, volume_id, session, scan_idx, field, stack_channel, scan_channel, registration_method
"""
class UnitInfo(dj.Part):
definition = """ # ScanSet.UnitInfo centroids mapped to stack coordinates
-> master # this will add field and channels back
-> ScanSet.Unit
---
stack_x : float
stack_y : float
stack_z : float
"""
def make(self, key):
from scipy import ndimage
# Get registration grid (px -> stack_coordinate)
stack_key = {**key, 'scan_session': key['session']}
field_res = (ScanInfo & key).microns_per_pixel
grid = (stack.Registration & stack_key).get_grid(type='affine',
desired_res=field_res)
self.insert1(key)
field_units = ScanSet.UnitInfo & (ScanSet.Unit & key)
for unit_key, px_x, px_y in zip(*field_units.fetch('KEY', 'px_x', 'px_y')):
px_coords = np.array([[px_y], [px_x]])
unit_x, unit_y, unit_z = [ndimage.map_coordinates(grid[..., i], px_coords,
order=1)[0] for i in
range(3)]
StackCoordinates.UnitInfo.insert1({**key, **unit_key, 'stack_x': unit_x,
'stack_y': unit_y, 'stack_z': unit_z})
@schema
class Func2StructMatching(dj.Computed):
definition = """ # match functional masks to structural masks
-> ScanSet # animal_id, session, scan_idx, pipe_version, field, channel
-> stack.FieldSegmentation.proj(session='scan_session') # animal_id, stack_session, stack_idx, volume_id, session, scan_idx, field, stack_channel, scan_channel, registration_method, stacksegm_channel, stacksegm_method
---
key_hash : varchar(32) # single attribute representation of the key (used to avoid going over 16 attributes in the key)
"""
class AllMatches(dj.Part):
definition = """ # store all possible matches (one functional cell could match with more than one structural mask and viceversa)
key_hash : varchar(32) # master key
unit_id : int # functional unit id
sunit_id : int # structural unit id
---
iou : float # intersection-over-union of the 2-d masks
"""
# Used key_hash because key using ScanSet.Unit, FieldSegmentation.StackUnit has
# more than 16 attributes and MySQL complains. I added the foreign key constraints
# manually
class Match(dj.Part):
definition = """ # match of a functional mask to a structural mask (1:1 relation)
-> master
-> ScanSet.Unit
---
-> stack.FieldSegmentation.StackUnit.proj(session='scan_session')
iou : float # Intersection-over-Union of the 2-d masks
distance2d : float # distance between centroid of 2-d masks
distance3d : float # distance between functional centroid and structural centroid
"""
def make(self, key):
from .utils import registration
from scipy import ndimage
# Get caiman masks and resize them
field_dims = (ScanInfo & key).fetch1('um_height', 'um_width')
masks = np.moveaxis((Segmentation & key).get_all_masks(), -1, 0)
masks = np.stack([registration.resize(m, field_dims, desired_res=1) for m in
masks])
scansetunit_keys = (ScanSet.Unit & key).fetch('KEY', order_by='mask_id')
# Binarize masks
binary_masks = np.zeros(masks.shape, dtype=bool)
for i, mask in enumerate(masks):
## Compute cumulative mass (similar to caiman)
indices = np.unravel_index(np.flip(np.argsort(mask, axis=None), axis=0),
mask.shape) # max to min value in mask
cumsum_mask = np.cumsum(mask[indices] ** 2) / np.sum(mask ** 2)# + 1e-9)
binary_masks[i][indices] = cumsum_mask < 0.9
# Get structural segmentation and registration grid
stack_key = {**key, 'scan_session': key['session']}
segmented_field = (stack.FieldSegmentation & stack_key).fetch1('segm_field')
grid = (stack.Registration & stack_key).get_grid(type='affine', desired_res=1)
sunit_ids = (stack.FieldSegmentation.StackUnit & stack_key).fetch('sunit_id',
order_by='sunit_id')
# Create matrix with IOU values (rows for structural units, columns for functional units)
ious = []
for sunit_id in sunit_ids:
binary_sunit = segmented_field == sunit_id
intersection = np.logical_and(binary_masks, binary_sunit).sum(axis=(1, 2)) # num_masks
union = np.logical_or(binary_masks, binary_sunit).sum(axis=(1, 2)) # num_masks
ious.append(intersection / union)
iou_matrix = np.stack(ious)
# Save all possible matches / iou_matrix > 0
self.insert1({**key, 'key_hash': key_hash(key)})
for mask_idx, func_idx in zip(*np.nonzero(iou_matrix)):
self.AllMatches.insert1({'key_hash': key_hash(key),
'unit_id': scansetunit_keys[func_idx]['unit_id'],
'sunit_id': sunit_ids[mask_idx],
'iou': iou_matrix[mask_idx, func_idx]})
# Iterate over matches (from best to worst), insert
while iou_matrix.max() > 0:
# Get next best
best_mask, best_func = np.unravel_index(np.argmax(iou_matrix),
iou_matrix.shape)
best_iou = iou_matrix[best_mask, best_func]
# Get stack unit coordinates
coords = (stack.FieldSegmentation.StackUnit & stack_key &
{'sunit_id': sunit_ids[best_mask]}).fetch1('sunit_z', 'sunit_y',
'sunit_x', 'mask_z',
'mask_y', 'mask_x')
sunit_z, sunit_y, sunit_x, mask_z, mask_y, mask_x = coords
# Compute distance to 2-d and 3-d mask
px_y, px_x = ndimage.measurements.center_of_mass(binary_masks[best_func])
px_coords = np.array([[px_y], [px_x]])
func_x, func_y, func_z = [ndimage.map_coordinates(grid[..., i], px_coords,
order=1)[0] for i in
range(3)]
distance2d = np.sqrt((func_z - mask_z) ** 2 + (func_y - mask_y) ** 2 +
(func_x - mask_x) ** 2)
distance3d = np.sqrt((func_z - sunit_z) ** 2 + (func_y - sunit_y) ** 2 +
(func_x - sunit_x) ** 2)
self.Match.insert1({**key, **scansetunit_keys[best_func],
'sunit_id': sunit_ids[best_mask], 'iou': best_iou,
'distance2d': distance2d, 'distance3d': distance3d})
# Deactivate match
iou_matrix[best_mask, :] = 0
iou_matrix[:, best_func] = 0
|
lgpl-3.0
|
hpparvi/PyTransit
|
pytransit/lpf/tesslpf.py
|
1
|
8500
|
# PyTransit: fast and easy exoplanet transit modelling in Python.
# Copyright (C) 2010-2019 Hannu Parviainen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from typing import Optional, Union, List
import astropy.units as u
from pathlib import Path
from astropy.stats import sigma_clipped_stats
from astropy.table import Table
from astropy.time import Time
from astropy.timeseries import TimeSeries
from corner import corner
from matplotlib.pyplot import setp
from matplotlib.pyplot import subplots
from numba import njit
from numpy import zeros, squeeze, ceil, arange, digitize, full, nan, \
sqrt, percentile, isfinite, floor, argsort, ones_like, atleast_2d, median, ndarray, unique, nanmedian, concatenate
from numpy.random import permutation
from pytransit.utils.tess import read_tess_spoc
from .loglikelihood import CeleriteLogLikelihood
from .lpf import BaseLPF
from .. import TransitModel
from ..orbits import epoch
from ..utils.keplerlc import KeplerLC
from ..utils.misc import fold
try:
from ldtk import tess
with_ldtk = True
except ImportError:
with_ldtk = False
@njit
def downsample_time(time, vals, inttime=1.):
duration = time.max() - time.min()
nbins = int(ceil(duration / inttime))
bins = arange(nbins)
edges = time[0] + bins * inttime
bids = digitize(time, edges) - 1
bt, bv, be = full(nbins, nan), zeros(nbins), zeros(nbins)
for i, bid in enumerate(bins):
bmask = bid == bids
if bmask.sum() > 0:
bt[i] = time[bmask].mean()
bv[i] = vals[bmask].mean()
if bmask.sum() > 2:
be[i] = vals[bmask].std() / sqrt(bmask.sum())
else:
be[i] = nan
m = isfinite(be)
return bt[m], bv[m], be[m]
class TESSLPF(BaseLPF):
bjdrefi = 2457000
def __init__(self, name: str, datadir: Path = None, tic: int = None, zero_epoch: float = None, period: float = None,
nsamples: int = 2, trdur: float = 0.125, bldur: float = 0.3, use_pdc=True,
sectors: Optional[Union[List[int], str]] = 'all',
split_transits=True, separate_noise=False, tm: TransitModel = None, minpt=10):
times, fluxes, sectors, wns = read_tess_spoc(tic, datadir, sectors=sectors, use_pdc=use_pdc)
self.lc = KeplerLC(times, fluxes, sectors, zero_epoch, period, trdur, bldur)
if split_transits:
times = self.lc.time_per_transit
fluxes = self.lc.normalized_flux_per_transit
else:
times = concatenate(self.lc.time_per_transit)
fluxes = concatenate(self.lc.normalized_flux_per_transit)
tref = floor(concatenate(times).min())
self.zero_epoch = zero_epoch
self.period = period
self.transit_duration = trdur
self.baseline_duration = bldur
wnids = arange(len(times)) if separate_noise else None
BaseLPF.__init__(self, name, ['TESS'], times=times, fluxes=fluxes,
nsamples=nsamples, exptimes=0.00139, wnids=wnids, tref=tref, tm=tm)
self.tm.interpolate = False
def _init_lnlikelihood(self):
self._add_lnlikelihood_model(CeleriteLogLikelihood(self))
def add_ldtk_prior(teff, logg, z):
if with_ldtk:
super().add_ldtk_prior(teff, logg, z, passbands=(tess,))
else:
raise ImportError('Could not import LDTk, cannot add an LDTk prior.')
def plot_individual_transits(self, solution: str = 'de', pv: ndarray = None, ncols: int = 2, n_samples: int = 100,
xlim: tuple = None, ylim: tuple = None, axs=None, figsize: tuple = None,
remove_baseline: bool = False):
solution = solution.lower()
samples = None
if pv is None:
if solution == 'local':
pv = self._local_minimization.x
elif solution in ('de', 'global'):
solution = 'global'
pv = self.de.minimum_location
elif solution in ('mcmc', 'mc'):
solution = 'mcmc'
samples = self.posterior_samples(derived_parameters=False)
samples = permutation(samples.values)[:n_samples]
pv = median(samples, 0)
else:
raise NotImplementedError("'solution' should be either 'local', 'global', or 'mcmc'")
t0 = floor(self.times[0].min())
nrows = int(ceil(self.nlc / ncols))
if axs is None:
fig, axs = subplots(nrows, ncols, figsize=figsize, sharey=True, constrained_layout=True)
else:
fig, axs = None, axs
[ax.autoscale(enable=True, axis='x', tight=True) for ax in axs.flat]
def baseline(pvp):
pvp = atleast_2d(pvp)
bl = zeros((pvp.shape[0], self.ofluxa.size))
for i, pv in enumerate(pvp):
bl[i] = self._lnlikelihood_models[0].predict_baseline(pv)
return bl
if remove_baseline:
if solution == 'mcmc':
fbasel = median(baseline(samples), axis=0)
fmodel, fmodm, fmodp = percentile(self.transit_model(samples), [50, 0.5, 99.5], axis=0)
else:
fbasel = squeeze(baseline(pv))
fmodel, fmodm, fmodp = squeeze(self.transit_model(pv)), None, None
fobs = self.ofluxa / fbasel
else:
if solution == 'mcmc':
fbasel = median(baseline(samples), axis=0)
fmodel, fmodm, fmodp = percentile(self.flux_model(samples), [50, 1, 99], axis=0)
else:
fbasel = squeeze(baseline(pv))
fmodel, fmodm, fmodp = squeeze(self.flux_model(pv)), None, None
fobs = self.ofluxa
t0, p = pv[[0, 1]]
for i, sl in enumerate(self.lcslices):
ax = axs.flat[i]
t = self.times[i]
e = epoch(t.mean(), t0, p)
tc = t0 + e * p
tt = 24 * (t - tc)
ax.plot(tt, fobs[sl], 'k.', alpha=0.2)
ax.plot(tt, fmodel[sl], 'k')
if solution == 'mcmc':
ax.fill_between(tt, fmodm[sl], fmodp[sl], zorder=-100, alpha=0.2, fc='k')
if not remove_baseline:
ax.plot(tt, fbasel[sl], 'k--', alpha=0.2)
setp(axs, xlim=xlim, ylim=ylim)
setp(axs[-1, :], xlabel='Time - T$_c$ [h]')
setp(axs[:, 0], ylabel='Normalised flux')
return fig
def plot_folded_transit(self, method='de', figsize=(13, 6), ylim=(0.9975, 1.002), xlim=None, binwidth=8,
remove_baseline: bool = False):
if method == 'de':
pv = self.de.minimum_location
tc, p = pv[[0, 1]]
else:
raise NotImplementedError
phase = p * fold(self.timea, p, tc, 0.5)
binwidth = binwidth / 24 / 60
sids = argsort(phase)
tm = self.transit_model(pv)
if remove_baseline:
gp = self._lnlikelihood_models[0]
bl = squeeze(gp.predict_baseline(pv))
else:
bl = ones_like(self.ofluxa)
bp, bfo, beo = downsample_time(phase[sids], (self.ofluxa / bl)[sids], binwidth)
fig, ax = subplots(figsize=figsize)
ax.plot(phase - 0.5 * p, self.ofluxa / bl, '.', alpha=0.15)
ax.errorbar(bp - 0.5 * p, bfo, beo, fmt='ko')
ax.plot(phase[sids] - 0.5 * p, tm[sids], 'k')
xlim = xlim if xlim is not None else 1.01 * (bp[isfinite(bp)][[0, -1]] - 0.5 * p)
setp(ax, ylim=ylim, xlim=xlim, xlabel='Time - Tc [d]', ylabel='Normalised flux')
fig.tight_layout()
return fig
def plot_basic_posteriors(self):
df = self.posterior_samples()
corner(df['tc p rho b k'.split()],
labels='Zero epoch, Period, Stellar density, impact parameter, radius ratio'.split(', '))
|
gpl-2.0
|
harshaneelhg/scikit-learn
|
examples/cluster/plot_dbscan.py
|
346
|
2479
|
# -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
yunque/sms-tools
|
lectures/07-Sinusoidal-plus-residual-model/plots-code/stochasticSynthesisFrame.py
|
24
|
2966
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2+1 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.1
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(float(fs)*np.arange(mY.size)/N, mY, 'r', lw=1.5, label="mY")
plt.axis([0, maxFreq, -78, max(mX)+0.5])
plt.title('mY (stochastic approximation of mX)')
plt.subplot(3,1,2)
plt.plot(float(fs)*np.arange(pY.size)/N, pY-np.pi, 'c', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.title('pY (random phases)')
plt.subplot(3,1,3)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('yst')
plt.tight_layout()
plt.savefig('stochasticSynthesisFrame.png')
plt.show()
|
agpl-3.0
|
MoonRaker/pvlib-python
|
pvlib/spa.py
|
3
|
42425
|
"""
Calculate the solar position using the NREL SPA algorithm either using
numpy arrays or compiling the code to machine language with numba.
"""
# Contributors:
# Created by Tony Lorenzo (@alorenzo175), Univ. of Arizona, 2015
from __future__ import division
import os
import threading
import warnings
import logging
pvl_logger = logging.getLogger('pvlib')
import numpy as np
# this block is a way to use an environment variable to switch between
# compiling the functions with numba or just use numpy
def nocompile(*args, **kwargs):
return lambda func: func
if os.getenv('PVLIB_USE_NUMBA', '0') != '0':
try:
from numba import jit, __version__
except ImportError:
warnings.warn('Could not import numba, falling back to numpy ' +
'calculation')
jcompile = nocompile
USE_NUMBA = False
else:
major, minor = __version__.split('.')[:2]
if int(major + minor) >= 17:
# need at least numba >= 0.17.0
jcompile = jit
USE_NUMBA = True
else:
warnings.warn('Numba version must be >= 0.17.0, falling back to ' +
'numpy')
jcompile = nocompile
USE_NUMBA = False
else:
jcompile = nocompile
USE_NUMBA = False
TABLE_1_DICT = {
'L0': np.array(
[[175347046.0, 0.0, 0.0],
[3341656.0, 4.6692568, 6283.07585],
[34894.0, 4.6261, 12566.1517],
[3497.0, 2.7441, 5753.3849],
[3418.0, 2.8289, 3.5231],
[3136.0, 3.6277, 77713.7715],
[2676.0, 4.4181, 7860.4194],
[2343.0, 6.1352, 3930.2097],
[1324.0, 0.7425, 11506.7698],
[1273.0, 2.0371, 529.691],
[1199.0, 1.1096, 1577.3435],
[990.0, 5.233, 5884.927],
[902.0, 2.045, 26.298],
[857.0, 3.508, 398.149],
[780.0, 1.179, 5223.694],
[753.0, 2.533, 5507.553],
[505.0, 4.583, 18849.228],
[492.0, 4.205, 775.523],
[357.0, 2.92, 0.067],
[317.0, 5.849, 11790.629],
[284.0, 1.899, 796.298],
[271.0, 0.315, 10977.079],
[243.0, 0.345, 5486.778],
[206.0, 4.806, 2544.314],
[205.0, 1.869, 5573.143],
[202.0, 2.458, 6069.777],
[156.0, 0.833, 213.299],
[132.0, 3.411, 2942.463],
[126.0, 1.083, 20.775],
[115.0, 0.645, 0.98],
[103.0, 0.636, 4694.003],
[102.0, 0.976, 15720.839],
[102.0, 4.267, 7.114],
[99.0, 6.21, 2146.17],
[98.0, 0.68, 155.42],
[86.0, 5.98, 161000.69],
[85.0, 1.3, 6275.96],
[85.0, 3.67, 71430.7],
[80.0, 1.81, 17260.15],
[79.0, 3.04, 12036.46],
[75.0, 1.76, 5088.63],
[74.0, 3.5, 3154.69],
[74.0, 4.68, 801.82],
[70.0, 0.83, 9437.76],
[62.0, 3.98, 8827.39],
[61.0, 1.82, 7084.9],
[57.0, 2.78, 6286.6],
[56.0, 4.39, 14143.5],
[56.0, 3.47, 6279.55],
[52.0, 0.19, 12139.55],
[52.0, 1.33, 1748.02],
[51.0, 0.28, 5856.48],
[49.0, 0.49, 1194.45],
[41.0, 5.37, 8429.24],
[41.0, 2.4, 19651.05],
[39.0, 6.17, 10447.39],
[37.0, 6.04, 10213.29],
[37.0, 2.57, 1059.38],
[36.0, 1.71, 2352.87],
[36.0, 1.78, 6812.77],
[33.0, 0.59, 17789.85],
[30.0, 0.44, 83996.85],
[30.0, 2.74, 1349.87],
[25.0, 3.16, 4690.48]]),
'L1': np.array(
[[628331966747.0, 0.0, 0.0],
[206059.0, 2.678235, 6283.07585],
[4303.0, 2.6351, 12566.1517],
[425.0, 1.59, 3.523],
[119.0, 5.796, 26.298],
[109.0, 2.966, 1577.344],
[93.0, 2.59, 18849.23],
[72.0, 1.14, 529.69],
[68.0, 1.87, 398.15],
[67.0, 4.41, 5507.55],
[59.0, 2.89, 5223.69],
[56.0, 2.17, 155.42],
[45.0, 0.4, 796.3],
[36.0, 0.47, 775.52],
[29.0, 2.65, 7.11],
[21.0, 5.34, 0.98],
[19.0, 1.85, 5486.78],
[19.0, 4.97, 213.3],
[17.0, 2.99, 6275.96],
[16.0, 0.03, 2544.31],
[16.0, 1.43, 2146.17],
[15.0, 1.21, 10977.08],
[12.0, 2.83, 1748.02],
[12.0, 3.26, 5088.63],
[12.0, 5.27, 1194.45],
[12.0, 2.08, 4694.0],
[11.0, 0.77, 553.57],
[10.0, 1.3, 6286.6],
[10.0, 4.24, 1349.87],
[9.0, 2.7, 242.73],
[9.0, 5.64, 951.72],
[8.0, 5.3, 2352.87],
[6.0, 2.65, 9437.76],
[6.0, 4.67, 4690.48]]),
'L2': np.array(
[[52919.0, 0.0, 0.0],
[8720.0, 1.0721, 6283.0758],
[309.0, 0.867, 12566.152],
[27.0, 0.05, 3.52],
[16.0, 5.19, 26.3],
[16.0, 3.68, 155.42],
[10.0, 0.76, 18849.23],
[9.0, 2.06, 77713.77],
[7.0, 0.83, 775.52],
[5.0, 4.66, 1577.34],
[4.0, 1.03, 7.11],
[4.0, 3.44, 5573.14],
[3.0, 5.14, 796.3],
[3.0, 6.05, 5507.55],
[3.0, 1.19, 242.73],
[3.0, 6.12, 529.69],
[3.0, 0.31, 398.15],
[3.0, 2.28, 553.57],
[2.0, 4.38, 5223.69],
[2.0, 3.75, 0.98]]),
'L3': np.array(
[[289.0, 5.844, 6283.076],
[35.0, 0.0, 0.0],
[17.0, 5.49, 12566.15],
[3.0, 5.2, 155.42],
[1.0, 4.72, 3.52],
[1.0, 5.3, 18849.23],
[1.0, 5.97, 242.73]]),
'L4': np.array(
[[114.0, 3.142, 0.0],
[8.0, 4.13, 6283.08],
[1.0, 3.84, 12566.15]]),
'L5': np.array(
[[1.0, 3.14, 0.0]]),
'B0': np.array(
[[280.0, 3.199, 84334.662],
[102.0, 5.422, 5507.553],
[80.0, 3.88, 5223.69],
[44.0, 3.7, 2352.87],
[32.0, 4.0, 1577.34]]),
'B1': np.array(
[[9.0, 3.9, 5507.55],
[6.0, 1.73, 5223.69]]),
'R0': np.array(
[[100013989.0, 0.0, 0.0],
[1670700.0, 3.0984635, 6283.07585],
[13956.0, 3.05525, 12566.1517],
[3084.0, 5.1985, 77713.7715],
[1628.0, 1.1739, 5753.3849],
[1576.0, 2.8469, 7860.4194],
[925.0, 5.453, 11506.77],
[542.0, 4.564, 3930.21],
[472.0, 3.661, 5884.927],
[346.0, 0.964, 5507.553],
[329.0, 5.9, 5223.694],
[307.0, 0.299, 5573.143],
[243.0, 4.273, 11790.629],
[212.0, 5.847, 1577.344],
[186.0, 5.022, 10977.079],
[175.0, 3.012, 18849.228],
[110.0, 5.055, 5486.778],
[98.0, 0.89, 6069.78],
[86.0, 5.69, 15720.84],
[86.0, 1.27, 161000.69],
[65.0, 0.27, 17260.15],
[63.0, 0.92, 529.69],
[57.0, 2.01, 83996.85],
[56.0, 5.24, 71430.7],
[49.0, 3.25, 2544.31],
[47.0, 2.58, 775.52],
[45.0, 5.54, 9437.76],
[43.0, 6.01, 6275.96],
[39.0, 5.36, 4694.0],
[38.0, 2.39, 8827.39],
[37.0, 0.83, 19651.05],
[37.0, 4.9, 12139.55],
[36.0, 1.67, 12036.46],
[35.0, 1.84, 2942.46],
[33.0, 0.24, 7084.9],
[32.0, 0.18, 5088.63],
[32.0, 1.78, 398.15],
[28.0, 1.21, 6286.6],
[28.0, 1.9, 6279.55],
[26.0, 4.59, 10447.39]]),
'R1': np.array(
[[103019.0, 1.10749, 6283.07585],
[1721.0, 1.0644, 12566.1517],
[702.0, 3.142, 0.0],
[32.0, 1.02, 18849.23],
[31.0, 2.84, 5507.55],
[25.0, 1.32, 5223.69],
[18.0, 1.42, 1577.34],
[10.0, 5.91, 10977.08],
[9.0, 1.42, 6275.96],
[9.0, 0.27, 5486.78]]),
'R2': np.array(
[[4359.0, 5.7846, 6283.0758],
[124.0, 5.579, 12566.152],
[12.0, 3.14, 0.0],
[9.0, 3.63, 77713.77],
[6.0, 1.87, 5573.14],
[3.0, 5.47, 18849.23]]),
'R3': np.array(
[[145.0, 4.273, 6283.076],
[7.0, 3.92, 12566.15]]),
'R4': np.array(
[[4.0, 2.56, 6283.08]])
}
TABLE_1_DICT['L1'].resize((64, 3))
TABLE_1_DICT['L2'].resize((64, 3))
TABLE_1_DICT['L3'].resize((64, 3))
TABLE_1_DICT['L4'].resize((64, 3))
TABLE_1_DICT['L5'].resize((64, 3))
TABLE_1_DICT['B1'].resize((5, 3))
TABLE_1_DICT['R1'].resize((40, 3))
TABLE_1_DICT['R2'].resize((40, 3))
TABLE_1_DICT['R3'].resize((40, 3))
TABLE_1_DICT['R4'].resize((40, 3))
HELIO_LONG_TABLE = np.array([TABLE_1_DICT['L0'],
TABLE_1_DICT['L1'],
TABLE_1_DICT['L2'],
TABLE_1_DICT['L3'],
TABLE_1_DICT['L4'],
TABLE_1_DICT['L5']])
HELIO_LAT_TABLE = np.array([TABLE_1_DICT['B0'],
TABLE_1_DICT['B1']])
HELIO_RADIUS_TABLE = np.array([TABLE_1_DICT['R0'],
TABLE_1_DICT['R1'],
TABLE_1_DICT['R2'],
TABLE_1_DICT['R3'],
TABLE_1_DICT['R4']])
NUTATION_ABCD_ARRAY = np.array([
[-171996, -174.2, 92025, 8.9],
[-13187, -1.6, 5736, -3.1],
[-2274, -0.2, 977, -0.5],
[2062, 0.2, -895, 0.5],
[1426, -3.4, 54, -0.1],
[712, 0.1, -7, 0],
[-517, 1.2, 224, -0.6],
[-386, -0.4, 200, 0],
[-301, 0, 129, -0.1],
[217, -0.5, -95, 0.3],
[-158, 0, 0, 0],
[129, 0.1, -70, 0],
[123, 0, -53, 0],
[63, 0, 0, 0],
[63, 0.1, -33, 0],
[-59, 0, 26, 0],
[-58, -0.1, 32, 0],
[-51, 0, 27, 0],
[48, 0, 0, 0],
[46, 0, -24, 0],
[-38, 0, 16, 0],
[-31, 0, 13, 0],
[29, 0, 0, 0],
[29, 0, -12, 0],
[26, 0, 0, 0],
[-22, 0, 0, 0],
[21, 0, -10, 0],
[17, -0.1, 0, 0],
[16, 0, -8, 0],
[-16, 0.1, 7, 0],
[-15, 0, 9, 0],
[-13, 0, 7, 0],
[-12, 0, 6, 0],
[11, 0, 0, 0],
[-10, 0, 5, 0],
[-8, 0, 3, 0],
[7, 0, -3, 0],
[-7, 0, 0, 0],
[-7, 0, 3, 0],
[-7, 0, 3, 0],
[6, 0, 0, 0],
[6, 0, -3, 0],
[6, 0, -3, 0],
[-6, 0, 3, 0],
[-6, 0, 3, 0],
[5, 0, 0, 0],
[-5, 0, 3, 0],
[-5, 0, 3, 0],
[-5, 0, 3, 0],
[4, 0, 0, 0],
[4, 0, 0, 0],
[4, 0, 0, 0],
[-4, 0, 0, 0],
[-4, 0, 0, 0],
[-4, 0, 0, 0],
[3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
])
NUTATION_YTERM_ARRAY = np.array([
[0, 0, 0, 0, 1],
[-2, 0, 0, 2, 2],
[0, 0, 0, 2, 2],
[0, 0, 0, 0, 2],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[-2, 1, 0, 2, 2],
[0, 0, 0, 2, 1],
[0, 0, 1, 2, 2],
[-2, -1, 0, 2, 2],
[-2, 0, 1, 0, 0],
[-2, 0, 0, 2, 1],
[0, 0, -1, 2, 2],
[2, 0, 0, 0, 0],
[0, 0, 1, 0, 1],
[2, 0, -1, 2, 2],
[0, 0, -1, 0, 1],
[0, 0, 1, 2, 1],
[-2, 0, 2, 0, 0],
[0, 0, -2, 2, 1],
[2, 0, 0, 2, 2],
[0, 0, 2, 2, 2],
[0, 0, 2, 0, 0],
[-2, 0, 1, 2, 2],
[0, 0, 0, 2, 0],
[-2, 0, 0, 2, 0],
[0, 0, -1, 2, 1],
[0, 2, 0, 0, 0],
[2, 0, -1, 0, 1],
[-2, 2, 0, 2, 2],
[0, 1, 0, 0, 1],
[-2, 0, 1, 0, 1],
[0, -1, 0, 0, 1],
[0, 0, 2, -2, 0],
[2, 0, -1, 2, 1],
[2, 0, 1, 2, 2],
[0, 1, 0, 2, 2],
[-2, 1, 1, 0, 0],
[0, -1, 0, 2, 2],
[2, 0, 0, 2, 1],
[2, 0, 1, 0, 0],
[-2, 0, 2, 2, 2],
[-2, 0, 1, 2, 1],
[2, 0, -2, 0, 1],
[2, 0, 0, 0, 1],
[0, -1, 1, 0, 0],
[-2, -1, 0, 2, 1],
[-2, 0, 0, 0, 1],
[0, 0, 2, 2, 1],
[-2, 0, 2, 0, 1],
[-2, 1, 0, 2, 1],
[0, 0, 1, -2, 0],
[-1, 0, 1, 0, 0],
[-2, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 1, 2, 0],
[0, 0, -2, 2, 2],
[-1, -1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, -1, 1, 2, 2],
[2, -1, -1, 2, 2],
[0, 0, 3, 2, 2],
[2, -1, 0, 2, 2],
])
@jcompile('float64(int64, int64, int64, int64, int64, int64, int64)',
nopython=True)
def julian_day_dt(year, month, day, hour, minute, second, microsecond):
"""This is the original way to calculate the julian day from the NREL paper.
However, it is much faster to convert to unix/epoch time and then convert
to julian day. Note that the date must be UTC."""
if month <= 2:
year = year-1
month = month+12
a = int(year/100)
b = 2 - a + int(a * 0.25)
frac_of_day = (microsecond + (second + minute * 60 + hour * 3600)
) * 1.0 / (3600*24)
d = day + frac_of_day
jd = (int(365.25 * (year + 4716)) + int(30.6001 * (month + 1)) + d +
b - 1524.5)
return jd
@jcompile('float64(float64)', nopython=True)
def julian_day(unixtime):
jd = unixtime * 1.0 / 86400 + 2440587.5
return jd
@jcompile('float64(float64, float64)', nopython=True)
def julian_ephemeris_day(julian_day, delta_t):
jde = julian_day + delta_t * 1.0 / 86400
return jde
@jcompile('float64(float64)', nopython=True)
def julian_century(julian_day):
jc = (julian_day - 2451545) * 1.0 / 36525
return jc
@jcompile('float64(float64)', nopython=True)
def julian_ephemeris_century(julian_ephemeris_day):
jce = (julian_ephemeris_day - 2451545) * 1.0 / 36525
return jce
@jcompile('float64(float64)', nopython=True)
def julian_ephemeris_millennium(julian_ephemeris_century):
jme = julian_ephemeris_century * 1.0 / 10
return jme
@jcompile('float64(float64)', nopython=True)
def heliocentric_longitude(jme):
l0 = 0.0
l1 = 0.0
l2 = 0.0
l3 = 0.0
l4 = 0.0
l5 = 0.0
for row in range(HELIO_LONG_TABLE.shape[1]):
l0 += (HELIO_LONG_TABLE[0, row, 0]
* np.cos(HELIO_LONG_TABLE[0, row, 1]
+ HELIO_LONG_TABLE[0, row, 2] * jme)
)
l1 += (HELIO_LONG_TABLE[1, row, 0]
* np.cos(HELIO_LONG_TABLE[1, row, 1]
+ HELIO_LONG_TABLE[1, row, 2] * jme)
)
l2 += (HELIO_LONG_TABLE[2, row, 0]
* np.cos(HELIO_LONG_TABLE[2, row, 1]
+ HELIO_LONG_TABLE[2, row, 2] * jme)
)
l3 += (HELIO_LONG_TABLE[3, row, 0]
* np.cos(HELIO_LONG_TABLE[3, row, 1]
+ HELIO_LONG_TABLE[3, row, 2] * jme)
)
l4 += (HELIO_LONG_TABLE[4, row, 0]
* np.cos(HELIO_LONG_TABLE[4, row, 1]
+ HELIO_LONG_TABLE[4, row, 2] * jme)
)
l5 += (HELIO_LONG_TABLE[5, row, 0]
* np.cos(HELIO_LONG_TABLE[5, row, 1]
+ HELIO_LONG_TABLE[5, row, 2] * jme)
)
l_rad = (l0 + l1 * jme + l2 * jme**2 + l3 * jme**3 + l4 * jme**4 +
l5 * jme**5)/10**8
l = np.rad2deg(l_rad)
return l % 360
@jcompile('float64(float64)', nopython=True)
def heliocentric_latitude(jme):
b0 = 0.0
b1 = 0.0
for row in range(HELIO_LAT_TABLE.shape[1]):
b0 += (HELIO_LAT_TABLE[0, row, 0]
* np.cos(HELIO_LAT_TABLE[0, row, 1]
+ HELIO_LAT_TABLE[0, row, 2] * jme)
)
b1 += (HELIO_LAT_TABLE[1, row, 0]
* np.cos(HELIO_LAT_TABLE[1, row, 1]
+ HELIO_LAT_TABLE[1, row, 2] * jme)
)
b_rad = (b0 + b1 * jme)/10**8
b = np.rad2deg(b_rad)
return b
@jcompile('float64(float64)', nopython=True)
def heliocentric_radius_vector(jme):
r0 = 0.0
r1 = 0.0
r2 = 0.0
r3 = 0.0
r4 = 0.0
for row in range(HELIO_RADIUS_TABLE.shape[1]):
r0 += (HELIO_RADIUS_TABLE[0, row, 0]
* np.cos(HELIO_RADIUS_TABLE[0, row, 1]
+ HELIO_RADIUS_TABLE[0, row, 2] * jme)
)
r1 += (HELIO_RADIUS_TABLE[1, row, 0]
* np.cos(HELIO_RADIUS_TABLE[1, row, 1]
+ HELIO_RADIUS_TABLE[1, row, 2] * jme)
)
r2 += (HELIO_RADIUS_TABLE[2, row, 0]
* np.cos(HELIO_RADIUS_TABLE[2, row, 1]
+ HELIO_RADIUS_TABLE[2, row, 2] * jme)
)
r3 += (HELIO_RADIUS_TABLE[3, row, 0]
* np.cos(HELIO_RADIUS_TABLE[3, row, 1]
+ HELIO_RADIUS_TABLE[3, row, 2] * jme)
)
r4 += (HELIO_RADIUS_TABLE[4, row, 0]
* np.cos(HELIO_RADIUS_TABLE[4, row, 1]
+ HELIO_RADIUS_TABLE[4, row, 2] * jme)
)
r = (r0 + r1 * jme + r2 * jme**2 + r3 * jme**3 + r4 * jme**4)/10**8
return r
@jcompile('float64(float64)', nopython=True)
def geocentric_longitude(heliocentric_longitude):
theta = heliocentric_longitude + 180.0
return theta % 360
@jcompile('float64(float64)', nopython=True)
def geocentric_latitude(heliocentric_latitude):
beta = -1.0*heliocentric_latitude
return beta
@jcompile('float64(float64)', nopython=True)
def mean_elongation(julian_ephemeris_century):
x0 = (297.85036
+ 445267.111480 * julian_ephemeris_century
- 0.0019142 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 189474)
return x0
@jcompile('float64(float64)', nopython=True)
def mean_anomaly_sun(julian_ephemeris_century):
x1 = (357.52772
+ 35999.050340 * julian_ephemeris_century
- 0.0001603 * julian_ephemeris_century**2
- julian_ephemeris_century**3 / 300000)
return x1
@jcompile('float64(float64)', nopython=True)
def mean_anomaly_moon(julian_ephemeris_century):
x2 = (134.96298
+ 477198.867398 * julian_ephemeris_century
+ 0.0086972 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 56250)
return x2
@jcompile('float64(float64)', nopython=True)
def moon_argument_latitude(julian_ephemeris_century):
x3 = (93.27191
+ 483202.017538 * julian_ephemeris_century
- 0.0036825 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 327270)
return x3
@jcompile('float64(float64)', nopython=True)
def moon_ascending_longitude(julian_ephemeris_century):
x4 = (125.04452
- 1934.136261 * julian_ephemeris_century
+ 0.0020708 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 450000)
return x4
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def longitude_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
delta_psi_sum = 0
for row in range(NUTATION_YTERM_ARRAY.shape[0]):
a = NUTATION_ABCD_ARRAY[row, 0]
b = NUTATION_ABCD_ARRAY[row, 1]
argsin = (NUTATION_YTERM_ARRAY[row, 0]*x0 +
NUTATION_YTERM_ARRAY[row, 1]*x1 +
NUTATION_YTERM_ARRAY[row, 2]*x2 +
NUTATION_YTERM_ARRAY[row, 3]*x3 +
NUTATION_YTERM_ARRAY[row, 4]*x4)
term = (a + b * julian_ephemeris_century) * np.sin(np.radians(argsin))
delta_psi_sum += term
delta_psi = delta_psi_sum*1.0/36000000
return delta_psi
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def obliquity_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
delta_eps_sum = 0.0
for row in range(NUTATION_YTERM_ARRAY.shape[0]):
c = NUTATION_ABCD_ARRAY[row, 2]
d = NUTATION_ABCD_ARRAY[row, 3]
argcos = (NUTATION_YTERM_ARRAY[row, 0]*x0 +
NUTATION_YTERM_ARRAY[row, 1]*x1 +
NUTATION_YTERM_ARRAY[row, 2]*x2 +
NUTATION_YTERM_ARRAY[row, 3]*x3 +
NUTATION_YTERM_ARRAY[row, 4]*x4)
term = (c + d * julian_ephemeris_century) * np.cos(np.radians(argcos))
delta_eps_sum += term
delta_eps = delta_eps_sum*1.0/36000000
return delta_eps
@jcompile('float64(float64)', nopython=True)
def mean_ecliptic_obliquity(julian_ephemeris_millennium):
U = 1.0*julian_ephemeris_millennium/10
e0 = (84381.448 - 4680.93 * U - 1.55 * U**2
+ 1999.25 * U**3 - 51.38 * U**4 - 249.67 * U**5
- 39.05 * U**6 + 7.12 * U**7 + 27.87 * U**8
+ 5.79 * U**9 + 2.45 * U**10)
return e0
@jcompile('float64(float64, float64)', nopython=True)
def true_ecliptic_obliquity(mean_ecliptic_obliquity, obliquity_nutation):
e0 = mean_ecliptic_obliquity
deleps = obliquity_nutation
e = e0*1.0/3600 + deleps
return e
@jcompile('float64(float64)', nopython=True)
def aberration_correction(earth_radius_vector):
deltau = -20.4898 / (3600 * earth_radius_vector)
return deltau
@jcompile('float64(float64, float64, float64)', nopython=True)
def apparent_sun_longitude(geocentric_longitude, longitude_nutation,
aberration_correction):
lamd = geocentric_longitude + longitude_nutation + aberration_correction
return lamd
@jcompile('float64(float64, float64)', nopython=True)
def mean_sidereal_time(julian_day, julian_century):
v0 = (280.46061837 + 360.98564736629 * (julian_day - 2451545)
+ 0.000387933 * julian_century**2 - julian_century**3 / 38710000)
return v0 % 360.0
@jcompile('float64(float64, float64, float64)', nopython=True)
def apparent_sidereal_time(mean_sidereal_time, longitude_nutation,
true_ecliptic_obliquity):
v = mean_sidereal_time + longitude_nutation * np.cos(
np.radians(true_ecliptic_obliquity))
return v
@jcompile('float64(float64, float64, float64)', nopython=True)
def geocentric_sun_right_ascension(apparent_sun_longitude,
true_ecliptic_obliquity,
geocentric_latitude):
num = (np.sin(np.radians(apparent_sun_longitude))
* np.cos(np.radians(true_ecliptic_obliquity))
- np.tan(np.radians(geocentric_latitude))
* np.sin(np.radians(true_ecliptic_obliquity)))
alpha = np.degrees(np.arctan2(num, np.cos(
np.radians(apparent_sun_longitude))))
return alpha % 360
@jcompile('float64(float64, float64, float64)', nopython=True)
def geocentric_sun_declination(apparent_sun_longitude, true_ecliptic_obliquity,
geocentric_latitude):
delta = np.degrees(np.arcsin(np.sin(np.radians(geocentric_latitude)) *
np.cos(np.radians(true_ecliptic_obliquity)) +
np.cos(np.radians(geocentric_latitude)) *
np.sin(np.radians(true_ecliptic_obliquity)) *
np.sin(np.radians(apparent_sun_longitude))))
return delta
@jcompile('float64(float64, float64, float64)', nopython=True)
def local_hour_angle(apparent_sidereal_time, observer_longitude,
sun_right_ascension):
"""Measured westward from south"""
H = apparent_sidereal_time + observer_longitude - sun_right_ascension
return H % 360
@jcompile('float64(float64)', nopython=True)
def equatorial_horizontal_parallax(earth_radius_vector):
xi = 8.794 / (3600 * earth_radius_vector)
return xi
@jcompile('float64(float64)', nopython=True)
def uterm(observer_latitude):
u = np.arctan(0.99664719 * np.tan(np.radians(observer_latitude)))
return u
@jcompile('float64(float64, float64, float64)', nopython=True)
def xterm(u, observer_latitude, observer_elevation):
x = (np.cos(u) + observer_elevation / 6378140
* np.cos(np.radians(observer_latitude)))
return x
@jcompile('float64(float64, float64, float64)', nopython=True)
def yterm(u, observer_latitude, observer_elevation):
y = (0.99664719 * np.sin(u) + observer_elevation / 6378140
* np.sin(np.radians(observer_latitude)))
return y
@jcompile('float64(float64, float64,float64, float64)', nopython=True)
def parallax_sun_right_ascension(xterm, equatorial_horizontal_parallax,
local_hour_angle, geocentric_sun_declination):
num = (-xterm * np.sin(np.radians(equatorial_horizontal_parallax))
* np.sin(np.radians(local_hour_angle)))
denom = (np.cos(np.radians(geocentric_sun_declination))
- xterm * np.sin(np.radians(equatorial_horizontal_parallax))
* np.cos(np.radians(local_hour_angle)))
delta_alpha = np.degrees(np.arctan2(num, denom))
return delta_alpha
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_sun_right_ascension(geocentric_sun_right_ascension,
parallax_sun_right_ascension):
alpha_prime = geocentric_sun_right_ascension + parallax_sun_right_ascension
return alpha_prime
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def topocentric_sun_declination(geocentric_sun_declination, xterm, yterm,
equatorial_horizontal_parallax,
parallax_sun_right_ascension,
local_hour_angle):
num = ((np.sin(np.radians(geocentric_sun_declination)) - yterm
* np.sin(np.radians(equatorial_horizontal_parallax)))
* np.cos(np.radians(parallax_sun_right_ascension)))
denom = (np.cos(np.radians(geocentric_sun_declination)) - xterm
* np.sin(np.radians(equatorial_horizontal_parallax))
* np.cos(np.radians(local_hour_angle)))
delta = np.degrees(np.arctan2(num, denom))
return delta
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_local_hour_angle(local_hour_angle,
parallax_sun_right_ascension):
H_prime = local_hour_angle - parallax_sun_right_ascension
return H_prime
@jcompile('float64(float64, float64, float64)', nopython=True)
def topocentric_elevation_angle_without_atmosphere(observer_latitude,
topocentric_sun_declination,
topocentric_local_hour_angle
):
e0 = np.degrees(np.arcsin(
np.sin(np.radians(observer_latitude))
* np.sin(np.radians(topocentric_sun_declination))
+ np.cos(np.radians(observer_latitude))
* np.cos(np.radians(topocentric_sun_declination))
* np.cos(np.radians(topocentric_local_hour_angle))))
return e0
@jcompile('float64(float64, float64, float64, float64)', nopython=True)
def atmospheric_refraction_correction(local_pressure, local_temp,
topocentric_elevation_angle_wo_atmosphere,
atmos_refract):
# switch sets delta_e when the sun is below the horizon
switch = topocentric_elevation_angle_wo_atmosphere >= -1.0 * (
0.26667 + atmos_refract)
delta_e = ((local_pressure / 1010.0) * (283.0 / (273 + local_temp))
* 1.02 / (60 * np.tan(np.radians(
topocentric_elevation_angle_wo_atmosphere
+ 10.3 / (topocentric_elevation_angle_wo_atmosphere
+ 5.11))))) * switch
return delta_e
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_elevation_angle(topocentric_elevation_angle_without_atmosphere,
atmospheric_refraction_correction):
e = (topocentric_elevation_angle_without_atmosphere
+ atmospheric_refraction_correction)
return e
@jcompile('float64(float64)', nopython=True)
def topocentric_zenith_angle(topocentric_elevation_angle):
theta = 90 - topocentric_elevation_angle
return theta
@jcompile('float64(float64, float64, float64)', nopython=True)
def topocentric_astronomers_azimuth(topocentric_local_hour_angle,
topocentric_sun_declination,
observer_latitude):
num = np.sin(np.radians(topocentric_local_hour_angle))
denom = (np.cos(np.radians(topocentric_local_hour_angle))
* np.sin(np.radians(observer_latitude))
- np.tan(np.radians(topocentric_sun_declination))
* np.cos(np.radians(observer_latitude)))
gamma = np.degrees(np.arctan2(num, denom))
return gamma % 360
@jcompile('float64(float64)', nopython=True)
def topocentric_azimuth_angle(topocentric_astronomers_azimuth):
phi = topocentric_astronomers_azimuth + 180
return phi % 360
@jcompile('float64(float64)', nopython=True)
def sun_mean_longitude(julian_ephemeris_millennium):
M = (280.4664567 + 360007.6982779 * julian_ephemeris_millennium
+ 0.03032028 * julian_ephemeris_millennium**2
+ julian_ephemeris_millennium**3 / 49931
- julian_ephemeris_millennium**4 / 15300
- julian_ephemeris_millennium**5 / 2000000)
return M
@jcompile('float64(float64, float64, float64, float64)', nopython=True)
def equation_of_time(sun_mean_longitude, geocentric_sun_right_ascension,
longitude_nutation, true_ecliptic_obliquity):
E = (sun_mean_longitude - 0.0057183 - geocentric_sun_right_ascension +
longitude_nutation * np.cos(np.radians(true_ecliptic_obliquity)))
# limit between 0 and 360
E = E % 360
# convert to minutes
E *= 4
greater = E > 20
less = E < -20
other = (E <= 20) & (E >= -20)
E = greater * (E - 1440) + less * (E + 1440) + other * E
return E
@jcompile('void(float64[:], float64[:], float64[:,:])', nopython=True,
nogil=True)
def solar_position_loop(unixtime, loc_args, out):
"""Loop through the time array and calculate the solar position"""
lat = loc_args[0]
lon = loc_args[1]
elev = loc_args[2]
pressure = loc_args[3]
temp = loc_args[4]
delta_t = loc_args[5]
atmos_refract = loc_args[6]
sst = loc_args[7]
for i in range(unixtime.shape[0]):
utime = unixtime[i]
jd = julian_day(utime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
R = heliocentric_radius_vector(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4)
delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
if sst:
out[0, i] = v
out[1, i] = alpha
out[2, i] = delta
continue
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
alpha_prime = topocentric_sun_right_ascension(alpha, delta_alpha)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha,
H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
out[0, i] = theta
out[1, i] = theta0
out[2, i] = e
out[3, i] = e0
out[4, i] = phi
out[5, i] = eot
def solar_position_numba(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False):
"""Calculate the solar position using the numba compiled functions
and multiple threads. Very slow if functions are not numba compiled.
"""
loc_args = np.array([lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst])
ulength = unixtime.shape[0]
result = np.empty((6, ulength), dtype=np.float64)
if unixtime.dtype != np.float64:
unixtime = unixtime.astype(np.float64)
if ulength < numthreads:
pvl_logger.warning('The number of threads is more than the length of' +
' the time array. Only using %s threads.',
ulength)
numthreads = ulength
if numthreads <= 1:
pvl_logger.debug('Only using one thread for calculation')
solar_position_loop(unixtime, loc_args, result)
return result
split0 = np.array_split(unixtime, numthreads)
split2 = np.array_split(result, numthreads, axis=1)
chunks = [[a0, loc_args, split2[i]] for i, a0 in enumerate(split0)]
# Spawn one thread per chunk
threads = [threading.Thread(target=solar_position_loop, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
def solar_position_numpy(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False):
"""Calculate the solar position assuming unixtime is a numpy array. Note
this function will not work if the solar position functions were
compiled with numba.
"""
jd = julian_day(unixtime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
R = heliocentric_radius_vector(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4)
delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
if sst:
return v, alpha, delta
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
alpha_prime = topocentric_sun_right_ascension(alpha, delta_alpha)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha, H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
return theta, theta0, e, e0, phi, eot
def solar_position(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=8, sst=False):
"""
Calculate the solar position using the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled
and the code runs quickly. If not, the functions
still evaluate but use numpy instead.
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
lat : float
Latitude to calculate solar position for
lon : float
Longitude to calculate solar position for
elev : float
Elevation of location in meters
pressure : int or float
avg. yearly pressure at location in Pascals;
used for atmospheric correction
temp : int or float
avg. yearly temperature at location in
degrees C; used for atmospheric correction
delta_t : float, optional
Difference between terrestrial time and UT1.
By default, use USNO historical data and predictions
atmos_refrac : float, optional
The approximate atmospheric refraction (in degrees)
at sunrise and sunset.
numthreads: int, optional
Number of threads to use for computation if numba>=0.17
is installed.
Returns
-------
Numpy Array with elements:
apparent zenith,
zenith,
elevation,
apparent_elevation,
azimuth,
equation_of_time
References
----------
[1] I. Reda and A. Andreas, Solar position algorithm for solar radiation
applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
[2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
"""
if USE_NUMBA:
do_calc = solar_position_numba
else:
do_calc = solar_position_numpy
result = do_calc(unixtime, lat, lon, elev, pressure,
temp, delta_t, atmos_refract, numthreads,
sst)
if not isinstance(result, np.ndarray):
try:
result = np.array(result)
except Exception:
pass
return result
def transit_sunrise_sunset(dates, lat, lon, delta_t, numthreads):
"""
Calculate the sun transit, sunrise, and sunset
for a set of dates at a given location.
Parameters
----------
dates : array
Numpy array of ints/floats corresponding to the Unix time
for the dates of interest, must be midnight UTC (00:00+00:00)
on the day of interest.
lat : float
Latitude of location to perform calculation for
lon : float
Longitude of location
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
numthreads : int
Number to threads to use for calculation (if using numba)
Returns
-------
tuple : (transit, sunrise, sunset) localized to UTC
"""
if ((dates % 86400) != 0.0).any():
raise ValueError('Input dates must be at 00:00 UTC')
utday = (dates // 86400) * 86400
ttday0 = utday - delta_t
ttdayn1 = ttday0 - 86400
ttdayp1 = ttday0 + 86400
# index 0 is v, 1 is alpha, 2 is delta
utday_res = solar_position(utday, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
v = utday_res[0]
ttday0_res = solar_position(ttday0, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
ttdayn1_res = solar_position(ttdayn1, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
ttdayp1_res = solar_position(ttdayp1, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
m0 = (ttday0_res[1] - lon - v) / 360
cos_arg = ((np.sin(np.radians(-0.8333)) - np.sin(np.radians(lat))
* np.sin(np.radians(ttday0_res[2]))) /
(np.cos(np.radians(lat)) * np.cos(np.radians(ttday0_res[2]))))
cos_arg[abs(cos_arg) > 1] = np.nan
H0 = np.degrees(np.arccos(cos_arg)) % 180
m = np.empty((3, len(utday)))
m[0] = m0 % 1
m[1] = (m[0] - H0 / 360)
m[2] = (m[0] + H0 / 360)
# need to account for fractions of day that may be the next or previous
# day in UTC
add_a_day = m[2] >= 1
sub_a_day = m[1] < 0
m[1] = m[1] % 1
m[2] = m[2] % 1
vs = v + 360.985647 * m
n = m + delta_t / 86400
a = ttday0_res[1] - ttdayn1_res[1]
a[abs(a) > 2] = a[abs(a) > 2] % 1
ap = ttday0_res[2] - ttdayn1_res[2]
ap[abs(ap) > 2] = ap[abs(ap) > 2] % 1
b = ttdayp1_res[1] - ttday0_res[1]
b[abs(b) > 2] = b[abs(b) > 2] % 1
bp = ttdayp1_res[2] - ttday0_res[2]
bp[abs(bp) > 2] = bp[abs(bp) > 2] % 1
c = b - a
cp = bp - ap
alpha_prime = ttday0_res[1] + (n * (a + b + c * n)) / 2
delta_prime = ttday0_res[2] + (n * (ap + bp + cp * n)) / 2
Hp = (vs + lon - alpha_prime) % 360
Hp[Hp >= 180] = Hp[Hp >= 180] - 360
h = np.degrees(np.arcsin(np.sin(np.radians(lat)) *
np.sin(np.radians(delta_prime)) +
np.cos(np.radians(lat)) *
np.cos(np.radians(delta_prime))
* np.cos(np.radians(Hp))))
T = (m[0] - Hp[0] / 360) * 86400
R = (m[1] + (h[1] + 0.8333) / (360 * np.cos(np.radians(delta_prime[1])) *
np.cos(np.radians(lat)) *
np.sin(np.radians(Hp[1])))) * 86400
S = (m[2] + (h[2] + 0.8333) / (360 * np.cos(np.radians(delta_prime[2])) *
np.cos(np.radians(lat)) *
np.sin(np.radians(Hp[2])))) * 86400
S[add_a_day] += 86400
R[sub_a_day] -= 86400
transit = T + utday
sunrise = R + utday
sunset = S + utday
return transit, sunrise, sunset
|
bsd-3-clause
|
hsuantien/scikit-learn
|
examples/svm/plot_iris.py
|
62
|
3251
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problem.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
|
bsd-3-clause
|
Ledoux/ShareYourSystem
|
Pythonlogy/ShareYourSystem/Specials/Oldpredicters/Predispiker/draft/__init__ copy.py
|
1
|
16400
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Predispiker
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Specials.Predicters.Predicter"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
Predirater=BaseModule
import numpy as np
import scipy
from matplotlib import pyplot
#</ImportSpecificModules>
#<DefineLocals>
#</DefineLocals>
#<DefineClass>
@DecorationClass(**{
})
class PredispikerClass(BaseClass):
def default_init(self,
_PredispikingNeuronUnitsInt=0,
_PredispikingSensorUnitsInt=0,
_PredispikingPerturbativeWeightFloat=0.1,
_PredispikingRestVoltageFloat=-60.,
_PredispikingThresholdVoltageFloatsArray=None,
_PredispikingResetVoltageFloatsArray=None,
_PredispikingConstantTimeFloat=10.,
_PredispikingDelayTimeFloat=0.,
_PredispikingRunTimeFloat=100.,
_PredispikingStepTimeFloat=0.1,
_PredispikedTimeFloatsArray=None,
_PredispikingInputRandomStatStr='norm',
_PredispikingLateralRandomStatStr='norm',
_PredispikedCommandFloatsArray=None,
_PredispikedJacobianFloatsArray=None,
_PredispikedInputRandomFloatsArray=None,
_PredispikedPerturbativeInputWeigthFloatsArray=None,
_PredispikedNullFloatsArray=None,
_PredispikedExactLateralWeigthFloatsArray=None,
_PredispikedLateralRandomFloatsArray=None,
_PredispikedInitialSensorFloatsArray=None,
_PredispikedInitialNeuronFloatsArray=None,
_PredispikedSensorFloatsArray=None,
_PredispikedPerturbativeNeuronFloatsArray=None,
_PredispikedExactNeuronFloatsArray=None,
_PredispikedLeakNeuronFloatsArray=None,
_PredispikedPerturbativeDecoderFloatsArray=None,
_PredispikedExactDecoderFloatsArray=None,
_PredispikedLeakDecoderFloatsArray=None,
**_KwargVariablesDict
):
""" """
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_predispike(self):
#/#################/#
# External care : Prepare time and the command
#
#arange
self.PredispikedTimeFloatsArray=np.arange(
0.,
self.PredispikingRunTimeFloat,
self.PredispikingStepTimeFloat
)
#array
self.PredispikedCommandFloatsArray=np.array(
map(
lambda __IndexInt:
Predirater.getKrenelFloatsArray(
[0.,1.],
[self.PredispikingRunTimeFloat/4.,self.PredispikingRunTimeFloat/2.]
),
xrange(self.PredispikingSensorUnitsInt)
)
)
#debug
'''
self.debug(
[
'We have prepared the time and the commands',
('self.',self,['PredispikedCommandFloatsArray'])
]
)
'''
#/#################/#
# Sensor care : Prepare the input weigth and the null matrix
#
self.PredispikedJacobianFloatsArray=-(1./self.PredispikingConstantTimeFloat)*np.diag(
np.ones(
self.PredispikingSensorUnitsInt
)
)
#debug
'''
self.debug(
[
'We have prepared the sensor jacobian',
('self.',self,['PredispikedJacobianFloatsArray'])
]
)
'''
#/#################/#
# Neuron care : Prepare the input weigth, null matrix, exact and perturbativ matrix
#
#random
self.PredispikedExactDecoderWeigthFloatsArray=scipy.stats.uniform.rvs(
size=(
self.PredispikingSensorUnitsInt,
self.PredispikingNeuronUnitsInt
)
)
#find the null space
self.PredispikedNullFloatsArray=Predirater.getNullFloatsArray(
self.PredispikedExactDecoderWeigthFloatsArray
)
#debug
'''
PredispikedProductArray=np.dot(
self.PredispikedExactDecoderWeigthFloatsArray,
self.PredispikedNullFloatsArray
)
self.debug(
[
('self.',self,[
'PredispikedExactDecoderWeigthFloatsArray',
'PredispikingNeuronUnitsInt'
]
),
("locals()['",locals(),['PredispikedProductArray'],"']")
]
)
'''
#random
self.PredispikedInputRandomFloatsArray=self.PredispikingPerturbativeWeightFloat*getattr(
scipy.stats,
self.PredispikingInputRandomStatStr
).rvs(
size=(
np.shape(self.PredispikedNullFloatsArray)[1],
self.PredispikingSensorUnitsInt
)
)
#dot
self.PredispikedPerturbativeInputWeigthFloatsArray=np.dot(
self.PredispikedNullFloatsArray,
self.PredispikedInputRandomFloatsArray
)
#dot
self.PredispikedExactLateralWeigthFloatsArray=np.dot(
self.PredispikedExactDecoderWeigthFloatsArray.T,
self.PredispikedExactDecoderWeigthFloatsArray
)
#random
self.PredispikedLateralRandomFloatsArray=self.PredispikingPerturbativeWeightFloat*getattr(
scipy.stats,
self.PredispikingLateralRandomStatStr
).rvs(
size=(
np.shape(self.PredispikedNullFloatsArray)[1],
self.PredispikingNeuronUnitsInt
)
)
#dot
self.PredispikedPerturbativeLateralWeigthFloatsArray=np.dot(
self.PredispikedNullFloatsArray,
self.PredispikedLateralRandomFloatsArray
)
#pinv
self.PredispikedLeakDecoderWeigthFloatsArray=np.linalg.pinv(
self.PredispikedExactDecoderWeigthFloatsArray.T
)
#debug
'''
PredispikedPinvFloatsArray=np.dot(
self.PredispikedLeakDecoderWeigthFloatsArray,
self.PredispikedExactDecoderWeigthFloatsArray.T
)
self.debug(
[
'PredispikedPinvFloatsArray is ',
str(PredispikedPinvFloatsArray)
]
)
'''
#/#################/#
# Prepare the initial conditions
#
#random sensors
PredispikedInitialNeuronFloatsArray=scipy.stats.uniform.rvs(
size=self.PredispikingNeuronUnitsInt
)
#random rates
PredispikedInitialSensorFloatsArray=scipy.stats.uniform.rvs(
size=self.PredispikingSensorUnitsInt
)
#/#################/#
# Shape the size of all the runs
#
#init sensors
self.PredispikedSensorFloatsArray=np.zeros(
(self.PredispikingSensorUnitsInt,len(self.PredispikedTimeFloatsArray))
)
self.PredispikedSensorFloatsArray[:,0]=PredispikedInitialSensorFloatsArray
#init perturbative rates
self.PredispikedPerturbativeNeuronFloatsArray=np.zeros(
(self.PredispikingNeuronUnitsInt,len(self.PredispikedTimeFloatsArray))
)
self.PredispikedPerturbativeNeuronFloatsArray[:,0]=PredispikedInitialNeuronFloatsArray
#init exact rates
self.PredispikedExactNeuronFloatsArray=np.zeros(
(self.PredispikingNeuronUnitsInt,len(self.PredispikedTimeFloatsArray))
)
self.PredispikedExactNeuronFloatsArray[:,0]=PredispikedInitialNeuronFloatsArray
#init leak control rates
self.PredispikedLeakNeuronFloatsArray=np.zeros(
(self.PredispikingNeuronUnitsInt,len(self.PredispikedTimeFloatsArray))
)
self.PredispikedLeakNeuronFloatsArray[:,0]=PredispikedInitialNeuronFloatsArray
#init perturbative decoder
self.PredispikedPerturbativeDecoderFloatsArray=np.zeros(
(self.PredispikingSensorUnitsInt,len(self.PredispikedTimeFloatsArray))
)
self.PredispikedPerturbativeDecoderFloatsArray[:,0]=np.dot(
self.PredispikedExactDecoderWeigthFloatsArray,
PredispikedInitialNeuronFloatsArray
)
#init exact decoder
self.PredispikedExactDecoderFloatsArray=np.zeros(
(self.PredispikingSensorUnitsInt,len(self.PredispikedTimeFloatsArray))
)
self.PredispikedExactDecoderFloatsArray[:,0]=np.dot(
self.PredispikedExactDecoderWeigthFloatsArray,
PredispikedInitialNeuronFloatsArray
)
#init leak control decoder
self.PredispikedLeakDecoderFloatsArray=np.zeros(
(self.PredispikingSensorUnitsInt,len(self.PredispikedTimeFloatsArray))
)
self.PredispikedLeakDecoderFloatsArray[:,0]=np.dot(
self.PredispikedLeakDecoderWeigthFloatsArray,
PredispikedInitialNeuronFloatsArray
)
#/#################/#
# integrativ Loop
#
#for loop
for __IndexInt in xrange(1,len(self.PredispikedTimeFloatsArray)):
#/#################/#
# Sensor part
#
#debug
'''
self.debug(
[
'shape(self.PredispikedCommandFloatsArray) is '+str(
np.shape(self.PredispikedCommandFloatsArray)
),
'shape(self.PredispikedSensorFloatsArray) is '+str(
np.shape(self.PredispikedSensorFloatsArray)
),
('self.',self,[
'PredispikedJacobianFloatsArray'
])
]
)
'''
#Current
PredispikedSensorCurrentFloatsArray=np.dot(
self.PredispikedJacobianFloatsArray,
self.PredispikedSensorFloatsArray[:,__IndexInt-1]
)+self.PredispikedCommandFloatsArray[:,__IndexInt]
#/#################/#
# Perturbative Neuron part
#
#Input Current
PredispikedPerturbativeNeuronCurrentFloatsArray=np.dot(
self.PredispikedExactDecoderWeigthFloatsArray.T+self.PredispikedPerturbativeInputWeigthFloatsArray,
self.PredispikedCommandFloatsArray[:,__IndexInt-1]
)
#Lateral Current
PredispikedPerturbativeNeuronCurrentFloatsArray-=np.dot(
self.PredispikedExactLateralWeigthFloatsArray+self.PredispikedPerturbativeLateralWeigthFloatsArray,
self.PredispikedPerturbativeNeuronFloatsArray[:,__IndexInt-1]
)
#/#################/#
# Exact Neuron part
#
#Input Current
PredispikedExactNeuronCurrentFloatsArray=np.dot(
self.PredispikedExactDecoderWeigthFloatsArray.T,
self.PredispikedCommandFloatsArray[:,__IndexInt-1]
)
#Lateral Current
PredispikedExactNeuronCurrentFloatsArray-=np.dot(
self.PredispikedExactLateralWeigthFloatsArray,
self.PredispikedExactNeuronFloatsArray[:,__IndexInt-1]
)
#/#################/#
# Leak Control Neuron part
#
#Input Current
PredispikedLeakNeuronCurrentFloatsArray=np.dot(
self.PredispikedExactDecoderWeigthFloatsArray.T+self.PredispikedPerturbativeInputWeigthFloatsArray,
self.PredispikedCommandFloatsArray[:,__IndexInt-1]
)
#Lateral Current
PredispikedLeakNeuronCurrentFloatsArray-=np.dot(
np.diag(np.ones(self.PredispikingNeuronUnitsInt)),
self.PredispikedLeakNeuronFloatsArray[:,__IndexInt-1]
)
#/#################/#
# Euler part
#
#sensor
self.PredispikedSensorFloatsArray[
:,
__IndexInt
]=self.PredispikedSensorFloatsArray[
:,
__IndexInt-1
]+PredispikedSensorCurrentFloatsArray*self.PrediratingStepTimeFloat
#set
LocalDict=locals()
#rate
for __TagStr in ['Perturbative','Exact','Leak']:
#set
getattr(
self,
'Predispiked'+__TagStr+'NeuronFloatsArray'
)[:,__IndexInt]=getattr(
self,
'Predispiked'+__TagStr+'NeuronFloatsArray'
)[:,__IndexInt-1]+LocalDict[
'Predispiked'+__TagStr+'NeuronCurrentFloatsArray'
]*self.PredispikingStepTimeFloat
#/#################/#
# Decoder part
#
#dot
self.PredispikedPerturbativeDecoderFloatsArray[
:,
__IndexInt
]=np.dot(
self.PredispikedExactDecoderWeigthFloatsArray,
self.PredispikedPerturbativeNeuronFloatsArray[:,__IndexInt-1]
)
#exact control
self.PredispikedExactDecoderFloatsArray[
:,
__IndexInt
]=np.dot(
self.PredispikedExactDecoderWeigthFloatsArray,
self.PredispikedExactNeuronFloatsArray[:,__IndexInt-1]
)
#leak control
self.PredispikedLeakDecoderFloatsArray[
:,
__IndexInt
]=np.dot(
self.PredispikedLeakDecoderWeigthFloatsArray,
self.PredispikedLeakNeuronFloatsArray[:,__IndexInt-1]
)
#/#################/#
# Plot
#
#debug
self.debug(
[
'len(self.PredispikedTimeFloatsArray) is '+str(len(self.PredispikedTimeFloatsArray)),
'np.shape(self.PredispikedCommandFloatsArray) is '+str(np.shape(self.PredispikedCommandFloatsArray))
]
)
#init
pyplot.figure()
#/#################/#
# Command and sensors
#
#subplot
PredispikedSensorAxis=pyplot.subplot(3,1,1)
#command
map(
lambda __IndexInt:
PredispikedSensorAxis.plot(
self.PredispikedTimeFloatsArray,
self.PredispikedCommandFloatsArray[__IndexInt]
)
if __IndexInt<len(self.PredispikedCommandFloatsArray)
else None,
[0]
)
#sensor
map(
lambda __IndexInt:
PredispikedSensorAxis.plot(
self.PredispikedTimeFloatsArray,
self.PredispikedSensorFloatsArray[__IndexInt,:],
color='g',
linewidth=3
)
if __IndexInt<len(self.PredispikedSensorFloatsArray)
else None,
[0,1]
)
#set
PredispikedSensorAxis.set_xlim([0.,self.PredispikingRunTimeFloat])
PredispikedSensorAxis.set_ylim([-0.1,3.])
#/#################/#
# rates
#
#subplot
PredispikedNeuronAxis=pyplot.subplot(3,1,2)
#perturbative
map(
lambda __IndexInt:
PredispikedNeuronAxis.plot(
self.PredispikedTimeFloatsArray,
self.PredispikedPerturbativeNeuronFloatsArray[__IndexInt,:],
color='blue',
linewidth=3
)
if __IndexInt<len(self.PredispikedPerturbativeNeuronFloatsArray)
else None,
[0,1]
)
#exact
map(
lambda __IndexInt:
PredispikedNeuronAxis.plot(
self.PredispikedTimeFloatsArray,
self.PredispikedExactNeuronFloatsArray[__IndexInt,:],
color='violet',
linewidth=2
)
if __IndexInt<len(self.PredispikedPerturbativeNeuronFloatsArray)
else None,
[0,1]
)
#leak
map(
lambda __IndexInt:
PredispikedNeuronAxis.plot(
self.PredispikedTimeFloatsArray,
self.PredispikedLeakNeuronFloatsArray[__IndexInt,:],
color='brown',
linewidth=1
)
if __IndexInt<len(self.PredispikedLeakNeuronFloatsArray)
else None,
[0,1]
)
#set
PredispikedNeuronAxis.set_xlim([0.,self.PredispikingRunTimeFloat])
#PredispikedNeuronAxis.set_ylim([-1.,1.])
PredispikedNeuronAxis.set_ylim(
[
self.PredispikedPerturbativeNeuronFloatsArray.min(),
self.PredispikedPerturbativeNeuronFloatsArray.max()
]
)
#/#################/#
# decoders
#
#subplot
PredispikedDecoderAxis=pyplot.subplot(3,1,3)
#sensor
map(
lambda __IndexInt:
PredispikedDecoderAxis.plot(
self.PredispikedTimeFloatsArray,
self.PredispikedSensorFloatsArray[__IndexInt],
color='g',
linewidth=3
)
if __IndexInt<len(self.PredispikedSensorFloatsArray)
else None,
[0,1]
)
#perturbative
map(
lambda __IndexInt:
PredispikedDecoderAxis.plot(
self.PredispikedTimeFloatsArray,
self.PredispikedPerturbativeDecoderFloatsArray[__IndexInt,:],
color='blue',
linewidth=3
)
if __IndexInt<len(self.PredispikedPerturbativeDecoderFloatsArray)
else None,
[0,1]
)
#exact
map(
lambda __IndexInt:
PredispikedDecoderAxis.plot(
self.PredispikedTimeFloatsArray,
self.PredispikedExactDecoderFloatsArray[__IndexInt,:],
color='violet',
linewidth=2
)
if __IndexInt<len(self.PredispikedPerturbativeDecoderFloatsArray)
else None,
[0,1]
)
#leak
map(
lambda __IndexInt:
PredispikedDecoderAxis.plot(
self.PredispikedTimeFloatsArray,
self.PredispikedLeakDecoderFloatsArray[__IndexInt,:],
color='brown',
linewidth=1
)
if __IndexInt<len(self.PredispikedLeakDecoderFloatsArray)
else None,
[0,1]
)
#set
PredispikedDecoderAxis.set_xlim([0.,self.PredispikingRunTimeFloat])
PredispikedDecoderAxis.set_ylim([-0.1,3.])
#show
pyplot.show()
#</DefineClass>
#</DefinePrint>
PredispikerClass.PrintingClassSkipKeyStrsList.extend(
[
'PredispikingNeuronUnitsInt',
'PredispikingSensorUnitsInt',
'PredispikingPerturbativeWeightFloat',
'PredispikingRunTimeFloat',
'PredispikingStepTimeFloat',
'PredispikedTimeFloatsArray',
'PredispikedCommandFloatsArray',
'PredispikedJacobianFloatsArray',
'PredispikedExactDecoderWeigthFloatsArray',
'PredispikedInputRandomFloatsArray',
'PredispikedPerturbativeInputWeigthFloatsArray',
'PredispikedExactDecoderWeigthFloatsArray',
'PredispikedExactLateralWeigthFloatsArray',
'PredispikedLateralRandomFloatsArray',
'PredispikedExactLateralWeigthFloatsArray',
'PredispikedPerturbativeLateralWeigthFloatsArray',
'PredispikedNullFloatsArray',
'PredispikedInitialSensorFloatsArray',
'PredispikedInitialNeuronFloatsArray',
'PredispikedSensorFloatsArray',
'PredispikedPerturbativeNeuronFloatsArray',
'PredispikedExactNeuronFloatsArray',
'PredispikedLeakNeuronFloatsArray',
'PredispikedPerturbativeDecoderFloatsArray',
'PredispikedExactDecoderFloatsArray',
'PredispikedLeakDecoderWeigthFloatsArray',
'PredispikedLeakDecoderFloatsArray'
]
)
#<DefinePrint>
|
mit
|
RPGOne/Skynet
|
imbalanced-learn-master/examples/over-sampling/plot_smote_bordeline_2.py
|
3
|
1892
|
"""
==================
SMOTE borderline 2
==================
An illustration of the random SMOTE borderline 2 method.
"""
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Define some color for the plotting
almost_black = '#262626'
palette = sns.color_palette()
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply Borderline SMOTE 2
sm = SMOTE(kind='borderline2')
X_resampled, y_resampled = sm.fit_sample(X, y)
X_res_vis = pca.transform(X_resampled)
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')
ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
label="Class #0", alpha=.5, edgecolor=almost_black,
facecolor=palette[0], linewidth=0.15)
ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1],
label="Class #1", alpha=.5, edgecolor=almost_black,
facecolor=palette[2], linewidth=0.15)
ax2.set_title('SMOTE borderline 2')
plt.show()
|
bsd-3-clause
|
elvandy/nltools
|
examples/01_DataOperations/plot_adjacency.py
|
2
|
6654
|
"""
Adjacency Class
===============
Nltools has an additional data structure class for working with two-dimensional
square matrices. This can be helpful when working with similarity/distance
matrices or directed or undirected graphs. Similar to the Brain_Data class,
matrices are vectorized and can store multiple matrices in the same object.
This might reflect different brain regions, subjects, or time. Most of the
methods on the Adjacency class are consistent with those in the Brain_Data
class.
"""
#########################################################################
# Load Data
# ----------
#
# Similar to the Brain_Data class, Adjacency instances can be initialized by passing in a numpy array or pandas data frame, or a path to a csv file or list of files. Here we will generate some fake data to demonstrate how to use this class. In addition to data, you must indicate the type of matrix. Currently, you can specify `['similarity','distance','directed']`. Similarity matrices are symmetrical with typically ones along diagonal, Distance matrices are symmetrical with zeros along diagonal, and Directed graph matrices are not symmetrical. Symmetrical matrices only store the upper triangle. The Adjacency class can also accommodate labels, but does not require them.
from nltools.data import Adjacency
from scipy.linalg import block_diag
import numpy as np
m1 = block_diag(np.ones((4,4)),np.zeros((4,4)),np.zeros((4,4)))
m2 = block_diag(np.zeros((4,4)),np.ones((4,4)),np.zeros((4,4)))
m3 = block_diag(np.zeros((4,4)),np.zeros((4,4)),np.ones((4,4)))*3
noisy = (m1*1+m2*2+m3*3) + np.random.randn(12,12)*.1
dat = Adjacency(noisy, matrix_type='similarity',labels=['C1']*4 + ['C2']*4 + ['C3']*4)
#########################################################################
# Basic information about the object can be viewed by simply calling it.
print(dat)
#########################################################################
# Adjacency objects can easily be converted back into two-dimensional matrices with the `.squareform()` method.
dat.squareform()
#########################################################################
# Matrices can viewed as a heatmap using the `.plot()` method.
f = dat.plot()
#########################################################################
# The mean within a a grouping label can be calculated using the `.within_cluster_mean()` method. You must specify a group variable to group the data. Here we use the labels.
print(dat.within_cluster_mean(clusters=dat.labels))
#########################################################################
# Regression
# ----------
#
# Adjacency objects can currently accommodate two different types of regression. Sometimes we might want to decompose an Adjacency matrix from a linear combination of other Adjacency matrices. Other times we might want to perform a regression at each pixel in a stack of Adjacency matrices. Here we provide an example of each method. We use the same data we generated above, but attempt to decompose it by each block of data. We create the design matrix by simply concatenating the matrices we used to create the data object. The regress method returns a dictionary containing all of the relevant information from the regression. Here we show that the model recovers the average weight in each block.
X = Adjacency([m1,m2,m3],matrix_type='similarity')
stats = dat.regress(X)
print(stats['beta'])
#########################################################################
# In addition to decomposing a single adjacency matrix, we can also estimate a model that predicts the variance over each voxel. This is equivalent to a univariate regression in imaging analyses. Remember that just like in imaging these tests are non-independent and may require correcting for multiple comparisons. Here we create some data that varies over matrices and identify pixels that follow a particular on-off-on pattern. We plot the t-values that exceed 2.
from nltools.data import Design_Matrix
import matplotlib.pyplot as plt
data = Adjacency([m1 + np.random.randn(12,12)*.5 for x in range(5)] +
[np.zeros((12,12)) + np.random.randn(12,12)*.5 for x in range(5)] +
[m1 + np.random.randn(12,12)*.5 for x in range(5)])
X = Design_Matrix([1]*5 + [0]*5 + [1]*5)
f = X.plot()
f.set_title('Model',fontsize=18)
stats = data.regress(X)
t = stats['t'].plot(vmin=2)
plt.title('Significant Pixels',fontsize=18)
#########################################################################
# Similarity/Distance
# -------------------
#
# We can calculate similarity between two Adjacency matrices using `.similiarity()`.
stats = dat.similarity(m1)
print(stats)
#########################################################################
# We can also calculate the distance between multiple matrices contained within a single Adjacency object. Any distance metric is available from the `sci-kit learn <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html>`_ by specifying the `method` flag. This outputs an Adjacency matrix. In the example below we see that several matrices are more similar to each other (i.e., when the signal is on). Remember that the nodes here now represent each matrix from the original distance matrix.
dist = data.distance(method='correlation')
f = dist.plot()
#########################################################################
# Similarity matrices can be converted to and from Distance matrices using `.similarity_to_distance()` and `.distance_to_similarity()`.
f = dist.distance_to_similarity().plot()
#########################################################################
# Multidimensional Scaling
# ------------------------
#
# We can perform additional analyses on distance matrices such as multidimensional scaling. Here we provide an example to create a 3D multidimensional scaling plot of our data to see if the on and off matrices might naturally group together.
dat = Adjacency(noisy, matrix_type='similarity',labels=['C1']*4 + ['C2']*4 + ['C3']*4)
dist.labels = ['On']*5 + ['Off']*5 + ['On']*5
f = dist.plot_mds(n_components=3)
#########################################################################
# Graphs
# ------
#
# Adjacency matrices can be cast to networkx objects using `.to_graph()` if the optional dependency is installed. This allows any graph theoretic metrics or plots to be easily calculated from Adjacency objects.
import networkx as nx
dat = Adjacency(m1+m2+m3, matrix_type='similarity')
g = dat.to_graph()
print('Degree of each node: %s' % g.degree())
nx.draw_circular(g)
|
mit
|
imaculate/scikit-learn
|
examples/cluster/plot_birch_vs_minibatchkmeans.py
|
333
|
3694
|
"""
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
|
bsd-3-clause
|
petroniocandido/pyFTS
|
pyFTS/tests/hyperparam.py
|
1
|
5416
|
import numpy as np
import pandas as pd
from pyFTS.hyperparam import GridSearch, Evolutionary, mvfts as deho_mv
from pyFTS.models import pwfts
from pyFTS.models.multivariate import mvfts, wmvfts
from pyFTS.models.seasonal.common import DateTime
def get_dataset():
#from pyFTS.data import SONDA
from pyFTS.data import Malaysia
#data = [k for k in SONDA.get_data('ws_10m') if k > 0.1 and k != np.nan and k is not None]
#data = [np.nanmean(data[k:k+60]) for k in np.arange(0,len(data),60)]
#data = pd.read_csv('https://query.data.world/s/6xfb5useuotbbgpsnm5b2l3wzhvw2i', sep=';')
data = Malaysia.get_dataframe()
data['time'] = pd.to_datetime(data["time"], format='%m/%d/%y %I:%M %p')
#return 'SONDA.ws_10m', data
return 'Malaysia', data.iloc[:2000] #train, test
#return 'Malaysia.temperature', data # train, test
'''
hyperparams = {
'order':[3],
'partitions': np.arange(10,100,3),
'partitioner': [1],
'mf': [1], #, 2, 3, 4],
'lags': np.arange(2, 7, 1),
'alpha': np.arange(.0, .5, .05)
}
hyperparams = {
'order':[3], #[1, 2],
'partitions': np.arange(10,100,10),
'partitioner': [1,2],
'mf': [1] ,#, 2, 3, 4],
'lags': np.arange(1, 10),
'alpha': [.0, .3, .5]
}
'''
nodes = ['192.168.0.106', '192.168.0.110', '192.168.0.107']
datsetname, dataset = get_dataset()
#GridSearch.execute(hyperparams, datsetname, dataset, nodes=nodes,
# window_size=10000, train_rate=.9, increment_rate=1,)
explanatory_variables =[
{'name': 'Temperature', 'data_label': 'temperature', 'type': 'common'},
{'name': 'Daily', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.minute_of_day, 'npart': 24 },
{'name': 'Weekly', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.day_of_week, 'npart': 7 },
{'name': 'Monthly', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.day_of_month, 'npart': 4 },
{'name': 'Yearly', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.day_of_year, 'npart': 12 }
]
target_variable = {'name': 'Load', 'data_label': 'load', 'type': 'common'}
nodes=['192.168.28.38']
deho_mv.random_search(datsetname, dataset,
ngen=200, mgen=70,
window_size=2000, train_rate=.9, increment_rate=1,
experiments=1,
fts_method=wmvfts.WeightedMVFTS,
variables=explanatory_variables,
target_variable=target_variable,
#distributed='dispy', nodes=nodes,
parameters=dict(num_batches=5)
#parameters=dict(distributed='dispy', nodes=nodes, num_batches=5)
)
'''
deho_mv.execute(datsetname, dataset,
ngen=20, npop=15,psel=0.6, pcross=.5, pmut=.3,
window_size=2000, train_rate=.9, increment_rate=1,
experiments=1,
fts_method=wmvfts.WeightedMVFTS,
variables=explanatory_variables,
target_variable=target_variable,
#distributed='dispy', nodes=nodes,
parameters=dict(num_batches=5)
#parameters=dict(distributed='dispy', nodes=nodes, num_batches=5)
)
'''
'''
ret = Evolutionary.execute(datsetname, dataset,
ngen=30, npop=20,psel=0.6, pcross=.5, pmut=.3,
window_size=10000, train_rate=.9, increment_rate=.3,
experiments=1,
fts_method=pwfts.ProbabilisticWeightedFTS,
database_file='experiments.db',
distributed='dispy', nodes=nodes)
'''
#res = GridSearch.cluster_method({'mf':1, 'partitioner': 1, 'npart': 10, 'lags':[1], 'alpha': 0.0, 'order': 1},
# dataset, window_size = 10000, train_rate = .9, increment_rate = 1)
#print(res)
#Evolutionary.cluster_method(dataset, 70, 20, .8, .3, 1)
"""
from pyFTS.models import hofts
from pyFTS.partitioners import Grid
from pyFTS.benchmarks import Measures
fs = Grid.GridPartitioner(data=dataset[:800], npart=30)
model = hofts.WeightedHighOrderFTS(partitioner=fs, order=2)
model.fit(dataset[:800])
model.predict(dataset[800:1000])
Measures.get_point_statistics(dataset[800:1000], model)
print(model)
ret = Evolutionary.execute(datsetname, dataset,
ngen=30, npop=20, pcruz=.5, pmut=.3,
window_size=800, experiments=30)
parameters={'distributed': 'spark', 'url': 'spark://192.168.0.106:7077'})
print(ret)
from pyFTS.hyperparam import Evolutionary
from pyFTS.data import SONDA
data = np.array(SONDA.get_data('glo_avg'))
data = data[~(np.isnan(data) | np.equal(data, 0.0))]
dataset = data[:1000000]
del(data)
import pandas as pd
df = pd.read_csv('https://query.data.world/s/i7eb73c4rluf2luasppsyxaurx5ol7', sep=';')
dataset = df['glo_avg'].values
from pyFTS.models import hofts
from pyFTS.partitioners import Grid
from pyFTS.benchmarks import Measures
from time import time
t1 = time()
Evolutionary.execute('SONDA', dataset,
ngen=20, mgen=5, npop=15, pcruz=.5, pmut=.3,
window_size=35000, train_rate=.6, increment_rate=1,
collect_statistics=True, experiments=5)
#distributed='dispy', nodes=['192.168.0.110','192.168.0.106','192.168.0.107'])
t2 = time()
print(t2 - t1)
"""
|
gpl-3.0
|
mozman/ezdxf
|
src/ezdxf/addons/text2path.py
|
1
|
12266
|
# Copyright (c) 2021, Manfred Moitzi
# License: MIT License
from typing import Union, List, Dict, Tuple
import enum
from matplotlib.textpath import TextPath
from matplotlib.font_manager import FontProperties, findfont
from ezdxf.entities import Text, Attrib, Hatch
from ezdxf.lldxf import const
from ezdxf.math import Matrix44, BoundingBox
from ezdxf import path
from ezdxf.path import Path
from ezdxf.tools import fonts
from ezdxf.query import EntityQuery
__all__ = [
"make_path_from_str",
"make_paths_from_str",
"make_hatches_from_str",
"make_path_from_entity",
"make_paths_from_entity",
"make_hatches_from_entity",
"virtual_entities",
"explode",
"Kind",
]
AnyText = Union[Text, Attrib]
VALID_TYPES = ("TEXT", "ATTRIB")
def make_path_from_str(
s: str,
font: fonts.FontFace,
size: float = 1.0,
align: str = "LEFT",
length: float = 0,
m: Matrix44 = None,
) -> Path:
"""Convert a single line string `s` into a :term:`Multi-Path` object.
The text `size` is the height of the uppercase letter "X" (cap height).
The paths are aligned about the insertion point at (0, 0).
BASELINE means the bottom of the letter "X".
Args:
s: text to convert
font: font face definition as :class:`~ezdxf.tools.fonts.FontFace` object
size: text size (cap height) in drawing units
align: alignment as string, default is "LEFT"
length: target length for the "ALIGNED" and "FIT" alignments
m: transformation :class:`~ezdxf.math.Matrix44`
.. versionadded:: 0.17
"""
if len(s) == 0:
return Path()
font_properties, font_measurements = _get_font_data(font)
# scale font rendering units to drawing units:
render_size = size / font_measurements.cap_height
p = _str_to_path(s, font_properties, render_size)
bbox = path.bbox([p], flatten=0)
# Text is rendered in drawing units,
# therefore do alignment in drawing units:
draw_units_fm = font_measurements.scale_from_baseline(size)
matrix = alignment_transformation(draw_units_fm, bbox, align, length)
if m is not None:
matrix *= m
return p.transform(matrix)
def make_paths_from_str(
s: str,
font: fonts.FontFace,
size: float = 1.0,
align: str = "LEFT",
length: float = 0,
m: Matrix44 = None,
) -> List[Path]:
"""Convert a single line string `s` into a list of
:class:`~ezdxf.path.Path` objects. All paths are returned as a list of
:term:`Single-Path` objects.
The text `size` is the height of the uppercase letter "X" (cap height).
The paths are aligned about the insertion point at (0, 0).
BASELINE means the bottom of the letter "X".
Args:
s: text to convert
font: font face definition as :class:`~ezdxf.tools.fonts.FontFace` object
size: text size (cap height) in drawing units
align: alignment as string, default is "LEFT"
length: target length for the "ALIGNED" and "FIT" alignments
m: transformation :class:`~ezdxf.math.Matrix44`
"""
if len(s) == 0:
return []
p = make_path_from_str(s, font, size, align, length, m)
return list(p.sub_paths())
def _get_font_data(
font: fonts.FontFace,
) -> Tuple[FontProperties, fonts.FontMeasurements]:
fp = FontProperties(
family=font.family,
style=font.style,
stretch=font.stretch,
weight=font.weight,
)
ttf_path = findfont(fp)
fonts.load() # not expensive if already loaded
# The ttf file path is the cache key for font measurements:
fm = fonts.get_font_measurements(ttf_path)
return fp, fm
def _str_to_path(s: str, fp: FontProperties, size: float = 1.0) -> Path:
text_path = TextPath((0, 0), s, size=size, prop=fp, usetex=False)
return path.multi_path_from_matplotlib_path(text_path)
def alignment_transformation(
fm: fonts.FontMeasurements, bbox: BoundingBox, align: str, length: float
) -> Matrix44:
"""Returns the alignment transformation matrix to transform a basic
text path at location (0, 0) and alignment "LEFT" into the final text
path of the given alignment.
For the alignments FIT and ALIGNED defines the argument `length` the
total length of the final text path. The given bounding box defines the
rendering borders of the basic text path.
"""
halign, valign = const.TEXT_ALIGN_FLAGS[align.upper()]
matrix = basic_alignment_transformation(fm, bbox, halign, valign)
stretch_x = 1.0
stretch_y = 1.0
if align == "ALIGNED":
stretch_x = length / bbox.size.x
stretch_y = stretch_x
elif align == "FIT":
stretch_x = length / bbox.size.x
if stretch_x != 1.0:
matrix *= Matrix44.scale(stretch_x, stretch_y, 1.0)
return matrix
def basic_alignment_transformation(
fm: fonts.FontMeasurements, bbox: BoundingBox, halign: int, valign: int
) -> Matrix44:
if halign == const.LEFT:
shift_x = 0
elif halign == const.RIGHT:
shift_x = -bbox.extmax.x
elif halign == const.CENTER or halign > 2: # ALIGNED, MIDDLE, FIT
shift_x = -bbox.center.x
else:
raise ValueError(f"invalid halign argument: {halign}")
cap_height = fm.cap_height
descender_height = fm.descender_height
if valign == const.BASELINE:
shift_y = 0
elif valign == const.TOP:
shift_y = -cap_height
elif valign == const.MIDDLE:
shift_y = -cap_height / 2
elif valign == const.BOTTOM:
shift_y = descender_height
else:
raise ValueError(f"invalid valign argument: {valign}")
if halign == 4: # MIDDLE
shift_y = -cap_height + fm.total_height / 2.0
return Matrix44.translate(shift_x, shift_y, 0)
def make_hatches_from_str(
s: str,
font: fonts.FontFace,
size: float = 1.0,
align: str = "LEFT",
length: float = 0,
dxfattribs: Dict = None,
m: Matrix44 = None,
) -> List[Hatch]:
"""Convert a single line string `s` into a list of virtual
:class:`~ezdxf.entities.Hatch` entities.
The text `size` is the height of the uppercase letter "X" (cap height).
The paths are aligned about the insertion point at (0, 0).
The HATCH entities are aligned to this insertion point. BASELINE means the
bottom of the letter "X".
Args:
s: text to convert
font: font face definition as :class:`~ezdxf.tools.fonts.FontFace` object
size: text size (cap height) in drawing units
align: alignment as string, default is "LEFT"
length: target length for the "ALIGNED" and "FIT" alignments
dxfattribs: additional DXF attributes
m: transformation :class:`~ezdxf.math.Matrix44`
"""
# HATCH is an OCS entity, transforming just the polyline paths
# is not correct! The Hatch has to be created in the xy-plane!
paths = make_paths_from_str(s, font, size, align, length)
dxfattribs = dxfattribs or dict()
dxfattribs.setdefault("solid_fill", 1)
dxfattribs.setdefault("pattern_name", "SOLID")
dxfattribs.setdefault("color", const.BYLAYER)
hatches = path.to_hatches(paths, edge_path=True, dxfattribs=dxfattribs)
if m is not None:
# Transform HATCH entities as a unit:
return [hatch.transform(m) for hatch in hatches]
else:
return list(hatches)
def check_entity_type(entity):
if entity is None:
raise TypeError("entity is None")
elif not entity.dxftype() in VALID_TYPES:
raise TypeError(f"unsupported entity type: {entity.dxftype()}")
def make_path_from_entity(entity: AnyText) -> Path:
"""Convert text content from DXF entities TEXT and ATTRIB into a
:term:`Multi-Path` object.
The paths are located at the location of the source entity.
.. versionadded:: 0.17
"""
check_entity_type(entity)
fonts.load()
text = entity.plain_text()
p = make_path_from_str(
text,
fonts.get_font_face(entity.font_name()),
size=entity.dxf.height, # cap height in drawing units
align=entity.get_align(),
length=entity.fit_length(),
)
m = entity.wcs_transformation_matrix()
return p.transform(m)
def make_paths_from_entity(entity: AnyText) -> List[Path]:
"""Convert text content from DXF entities TEXT and ATTRIB into a
list of :class:`~ezdxf.path.Path` objects. All paths are returned as a
list of :term:`Single-Path` objects.
The paths are located at the location of the source entity.
"""
return list(make_path_from_entity(entity).sub_paths())
def make_hatches_from_entity(entity: AnyText) -> List[Hatch]:
"""Convert text content from DXF entities TEXT and ATTRIB into a
list of virtual :class:`~ezdxf.entities.Hatch` entities.
The hatches are placed at the same location as the source entity and have
the same DXF attributes as the source entity.
"""
check_entity_type(entity)
extrusion = entity.dxf.extrusion
attribs = entity.graphic_properties()
paths = make_paths_from_entity(entity)
return list(
path.to_hatches(
paths,
edge_path=True,
extrusion=extrusion,
dxfattribs=attribs,
)
)
@enum.unique
class Kind(enum.IntEnum):
"""The :class:`Kind` enum defines the DXF types to create as bit flags,
e.g. 1+2 to get HATCHES as filling and SPLINES and POLYLINES as outline:
=== =========== ==============================
Int Enum Description
=== =========== ==============================
1 HATCHES :class:`~ezdxf.entities.Hatch` entities as filling
2 SPLINES :class:`~ezdxf.entities.Spline` and 3D :class:`~ezdxf.entities.Polyline`
entities as outline
4 LWPOLYLINES :class:`~ezdxf.entities.LWPolyline` entities as approximated
(flattened) outline
=== =========== ==============================
"""
HATCHES = 1
SPLINES = 2
LWPOLYLINES = 4
def virtual_entities(entity: AnyText, kind: int = Kind.HATCHES) -> EntityQuery:
"""Convert the text content of DXF entities TEXT and ATTRIB into virtual
SPLINE and 3D POLYLINE entities or approximated LWPOLYLINE entities
as outlines, or as HATCH entities as fillings.
Returns the virtual DXF entities as an :class:`~ezdxf.query.EntityQuery`
object.
Args:
entity: TEXT or ATTRIB entity
kind: kind of entities to create as bit flags, see enum :class:`Kind`
"""
check_entity_type(entity)
extrusion = entity.dxf.extrusion
attribs = entity.graphic_properties()
entities = []
if kind & Kind.HATCHES:
entities.extend(make_hatches_from_entity(entity))
if kind & (Kind.SPLINES + Kind.LWPOLYLINES):
paths = make_paths_from_entity(entity)
if kind & Kind.SPLINES:
entities.extend(
path.to_splines_and_polylines(paths, dxfattribs=attribs)
)
if kind & Kind.LWPOLYLINES:
entities.extend(
path.to_lwpolylines(
paths, extrusion=extrusion, dxfattribs=attribs
)
)
return EntityQuery(entities)
def explode(
entity: AnyText, kind: int = Kind.HATCHES, target=None
) -> EntityQuery:
"""Explode the text `entity` into virtual entities,
see :func:`virtual_entities`. The source entity will be destroyed.
The target layout is given by the `target` argument, if `target` is ``None``,
the target layout is the source layout of the text entity.
Returns the created DXF entities as an :class:`~ezdxf.query.EntityQuery`
object.
Args:
entity: TEXT or ATTRIB entity to explode
kind: kind of entities to create as bit flags, see enum :class:`Kind`
target: target layout for new created DXF entities, ``None`` for the
same layout as the source entity.
"""
entities = virtual_entities(entity, kind)
# Explicit check for None is required, because empty layouts are also False
if target is None:
target = entity.get_layout()
entity.destroy()
if target is not None:
for e in entities:
target.add_entity(e)
return EntityQuery(entities)
|
mit
|
flohorovicic/pygeomod
|
pygeomod/struct_data.py
|
3
|
17593
|
"""Analysis and modification of structural data exported from GeoModeller
All structural data from an entire GeoModeller project can be exported into ASCII
files using the function in the GUI:
Export -> 3D Structural Data
This method generates files for defined geological parameters:
"Points" (i.e. formation contact points) and
"Foliations" (i.e. orientations/ potential field gradients).
Exported parameters include all those defined in sections as well as 3D data points.
This package contains methods to check, visualise, and extract/modify parts of these
exported data sets, for example to import them into a different Geomodeller project.
"""
# import os, sys
import numpy as np
import numpy.random
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
class Struct3DPoints():
"""Class container for 3D structural points data sets"""
def __init__(self, **kwds):
"""Structural points data set
**Optional keywords**:
- *filename* = string : filename of csv file with exported points to load
"""
# store point information in purpose defined numpy record
self.ptype = np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32),
('formation', np.str_, 32)])
if kwds.has_key("filename"):
self.filename = kwds['filename']
# read data
self.parse()
self.get_formation_names()
self.get_range()
def parse(self):
"""Parse filename and load data into numpy record
The point information is stored in a purpose defined numpy record
self.points
"""
f = open(self.filename, "r")
lines = f.readlines()
self.header = lines[0]
# determine position of elements in header (for extension to foliations, etc.)
h_elem = np.array(self.header.rstrip().split(','))
x_id = np.where(h_elem == 'X')[0]
y_id = np.where(h_elem == 'Y')[0]
z_id = np.where(h_elem == 'Z')[0]
form_id = np.where(h_elem == 'formation')[0]
# print x_id
# create numpy array for points
self.len = (len(lines)-1)
self.points = np.ndarray(self.len, dtype = self.ptype)
for i,line in enumerate(lines[1:]):
l = line.rstrip().split(',')
self.points[i]['x'] = float(l[x_id])
self.points[i]['y'] = float(l[y_id])
self.points[i]['z'] = float(l[z_id])
self.points[i]['formation'] = l[form_id]
def get_formation_names(self):
"""Get names of all formations that have a point in this data set
and store in:
self.formation_names
"""
# self.formation_names = np.unique(self.formations)
self.formation_names = np.unique(self.points[:]['formation'])
def get_range(self):
"""Update min, max for all coordinate axes and store in
self.xmin, self.xmax, ..."""
self.xmin = np.min(self.points['x'])
self.ymin = np.min(self.points['y'])
self.zmin = np.min(self.points['z'])
self.xmax = np.max(self.points['x'])
self.ymax = np.max(self.points['y'])
self.zmax = np.max(self.points['z'])
def create_formation_subset(self, formation_names):
"""Create a subset (as another Struct3DPoints object) with specified formations only
**Arguments**:
- *formation_names* : list of formation names
**Returns**:
Struct3DPoints object with subset of points
"""
# create new object
# reference to own class type for consistency with Struct3DFoliations
pts_subset = self.__class__()
# determine ids for all points of these formations:
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
if type(formation_names) == list:
for formation in formation_names:
ids[self.points['formation'] == formation] = True
else:
ids[self.points['formation'] == formation_names] = True
# new length is identical to sum of ids bool array (all True elements)
pts_subset.len = np.sum(ids)
# extract points
pts_subset.points = self.points[ids]
# update range
pts_subset.get_range()
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def remove_formations(self, formation_names):
"""Remove points for specified formations from the point set
This function can be useful, for example, to remove one formation, perform
a thinning operation, and then add it back in with the `combine_with` function.
**Arguments**:
- *formation_names* = list of formations to be removed (or a single string to
remove only one formation)
"""
# Note: implementation is very similar to create_formation_subset, only inverse
# and changes in original point set!
# determine ids for all points of these formations:
ids = np.ndarray((self.len), dtype='bool')
ids[:] = True
if type(formation_names) == list:
for formation in formation_names:
ids[self.points['formation'] == formation] = False
else:
ids[self.points['formation'] == formation_names] = False
self.len = np.sum(ids)
# extract points
self.points = self.points[ids]
# update range
self.get_range()
# update formation names
self.get_formation_names()
def rename_formations(self, rename_dict):
"""Rename formation according to assignments in dictionary
Mapping in dictionary is of the form:
old_name_1 : new_name_1, old_name_2 : new_name_2, ...
"""
for k,v in rename_dict.items():
print("Change name from %s to %s" % (k,v))
for p in self.points:
if p['formation'] == k: p['formation'] = v
# update formation names
self.get_formation_names()
def extract_range(self, **kwds):
"""Extract subset for defined ranges
Pass ranges as keywords: from_x, to_x, from_y, to_y, from_z, to_z
All not defined ranges are simply kept as before
**Returns**:
pts_subset : Struct3DPoints data subset
"""
from_x = kwds.get("from_x", self.xmin)
from_y = kwds.get("from_y", self.ymin)
from_z = kwds.get("from_z", self.zmin)
to_x = kwds.get("to_x", self.xmax)
to_y = kwds.get("to_y", self.ymax)
to_z = kwds.get("to_z", self.zmax)
# create new object
# pts_subset = Struct3DPoints()
pts_subset = self.__class__()
# determine ids for points in range
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
ids[(self.points['x'] >= from_x) *
(self.points['y'] >= from_y) *
(self.points['z'] >= from_z) *
(self.points['x'] <= to_x) *
(self.points['y'] <= to_y) *
(self.points['z'] <= to_z)] = True
# new length is identical to sum of ids bool array (all True elements)
pts_subset.len = np.sum(ids)
# extract points
pts_subset.points = self.points[ids]
# update range
pts_subset.get_range()
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def thin(self, nx, ny, nz, **kwds):
"""Thin data for one formations on grid with defined number of cells and store as subset
**Arguments**:
- *nx*, *ny*, *nz* = int : number of cells in each direction for thinning grid
The thinning is performed on a raster and not 'formation-aware',
following this simple procedure:
(1) Iterate through grid
(2) If multiple points for formation in this cell: thin
(3a) If thin: Select one point in cell at random and keep this one!
(3b) else: if one point in raneg, keep it!
Note: Thinning is performed for all formations, so make sure to create a subset
for a single formation first!
**Returns**:
pts_subset = Struct3DPoints : subset with thinned data for formation
"""
# DEVNOTE: This would be an awesome function to parallelise! Should be quite simple!
# first step: generate subset
# pts_subset = self.create_formation_subset([formation])
# create new pointset:
# reference to own class type for consistency with Struct3DFoliations
pts_subset = self.__class__()
# determine cell boundaries of subset for thinning:
delx = np.ones(nx) * (self.xmax - self.xmin) / nx
bound_x = self.xmin + np.cumsum(delx)
dely = np.ones(ny) * (self.ymax - self.ymin) / ny
bound_y = self.ymin + np.cumsum(dely)
delz = np.ones(nz) * (self.zmax - self.zmin) / nz
bound_z = self.zmin + np.cumsum(delz)
ids_to_keep = []
for i in range(nx-1):
for j in range(ny-1):
for k in range(nz-1):
# determin number of points in this cell
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
ids[(self.points['x'] > bound_x[i]) *
(self.points['y'] > bound_y[j]) *
(self.points['z'] > bound_z[k]) *
(self.points['x'] < bound_x[i+1]) *
(self.points['y'] < bound_y[j+1]) *
(self.points['z'] < bound_z[k+1])] = True
if np.sum(ids) > 1:
# Thinning required!
# keep random point
ids_to_keep.append(numpy.random.choice(np.where(ids)[0]))
# pts_subset.points[nx * ny * i + ny * j + k] = self.points[id_to_keep]
# assign to new pointset:
elif np.sum(ids) == 1:
# keep the one point, of course!
# pts_subset.points[nx * ny * i + ny * j + k] = self.points[ids[0]]
ids_to_keep.append(ids[0])
# now get points for all those ids:
# extract points
pts_subset.points = self.points[np.array(ids_to_keep)]
# update range
pts_subset.get_range()
# update length
pts_subset.len = len(pts_subset.points)
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def combine_with(self, pts_set):
"""Combine this point set with another point set
**Arguments**:
- *pts_set* = Struct3DPoints : points set to combine
"""
self.points = np.concatenate((self.points, pts_set.points))
# update range and everything
self.get_range()
self.get_formation_names()
self.len = len(self.points)
def plot_plane(self, plane=('x','y'), **kwds):
"""Create 2-D plots for point distribution
**Arguments**:
- *plane* = tuple of plane axes directions, e.g. ('x','y') (default)
**Optional Keywords**:
- *ax* = matplotlib axis object: if provided, plot is attached to this axis
- *formation_names* = list of formations : plot only points for specific formations
"""
color = kwds.get("color", 'b')
if kwds.has_key("ax"):
# axis is provided, attach here
ax = kwds['ax']
else:
fig = plt.figure()
ax = fig.add_subplot(111)
if kwds.has_key("formation_names"):
pts_subset = self.create_formation_subset(kwds['formation_names'])
ax.plot(pts_subset.points[:][plane[0]], pts_subset.points[:][plane[1]], '.', color = color)
else:
ax.plot(self.points[:][plane[0]], self.points[:][plane[1]], '.', color = color)
def plot_3D(self, **kwds):
"""Create a plot of points in 3-D
**Optional keywords**:
- *ax* = matplotlib axis object: if provided, plot is attached to this axis
- *formation_names* = list of formations : plot only points for specific formations
"""
if kwds.has_key("ax"):
# axis is provided, attach here
ax = kwds['ax']
else:
fig = plt.figure(figsize = (10,8))
ax = fig.add_subplot(111, projection='3d')
if kwds.has_key("formation_names"):
# create a subset with speficied formations, only
pts_subset = self.create_formation_subset(kwds['formation_names'])
pts_subset.plot_3D(ax = ax)
else:
# plot all
ax.scatter(self.points['x'], self.points['y'], self.points['z'])
def save(self, filename):
"""Save points set to file
**Arguments**:
- *filename* = string : name of new file
"""
f = open(filename, 'w')
f.write(self.header)
for point in self.points:
f.write("%.2f,%.2f,%.3f,%s\n" % (point['x'], point['y'], point['z'], point['formation']))
f.close()
class Struct3DFoliations(Struct3DPoints):
"""Class container for foliations (i.e. orientations) exported from GeoModeller
Mainly based on Struct3DPoints as must required functionality
for location of elements - some functions overwritten, e.g. save and parse to read orientation data,
as well!
However, further methods might be added or adapted in the future, for example:
- downsampling according to (eigen)vector methods, e.g. the work from the Monash guys, etc.
- ploting of orientations in 2-D and 3-D
"""
def __init__(self, **kwds):
"""Structural points data set
**Optional keywords**:
- *filename* = string : filename of csv file with exported points to load
"""
# store point information in purpose defined numpy record
self.ftype = np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32),
('azimuth', np.float32),
('dip', np.float32),
('polarity', np.int),
('formation', np.str_, 32)])
if kwds.has_key("filename"):
self.filename = kwds['filename']
# read data
self.parse()
self.get_formation_names()
self.get_range()
def parse(self):
"""Parse filename and load data into numpy record
The point information is stored in a purpose defined numpy record
self.points
"""
f = open(self.filename, "r")
lines = f.readlines()
self.header = lines[0]
# determine position of elements in header (for extension to foliations, etc.)
h_elem = np.array(self.header.rstrip().split(','))
x_id = np.where(h_elem == 'X')[0]
y_id = np.where(h_elem == 'Y')[0]
z_id = np.where(h_elem == 'Z')[0]
azi_id = np.where(h_elem == 'azimuth')[0]
dip_id = np.where(h_elem == 'dip')[0]
pol_id = np.where(h_elem == 'polarity')[0]
form_id = np.where(h_elem == 'formation')[0]
# print x_id
# create numpy array for points
self.len = (len(lines)-1)
self.points = np.ndarray(self.len, dtype = self.ftype)
for i,line in enumerate(lines[1:]):
l = line.rstrip().split(',')
self.points[i]['x'] = float(l[x_id])
self.points[i]['y'] = float(l[y_id])
self.points[i]['z'] = float(l[z_id])
self.points[i]['azimuth'] = float(l[azi_id])
self.points[i]['dip'] = float(l[dip_id])
self.points[i]['polarity'] = float(l[pol_id])
self.points[i]['formation'] = l[form_id]
def save(self, filename):
"""Save points set to file
**Arguments**:
- *filename* = string : name of new file
"""
f = open(filename, 'w')
f.write(self.header)
for point in self.points:
f.write("%.2f,%.2f,%.3f,%.3f,%.3f,%d,%s\n" % (point['x'], point['y'], point['z'],
point['azimuth'], point['dip'], point['polarity'],
point['formation']))
f.close()
if __name__ == '__main__':
pass
|
mit
|
BhallaLab/moose-core
|
python/moose/helper.py
|
4
|
2432
|
"""helper.py:
Some helper functions which are compatible with both python2 and python3.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2017-, Dilawar Singh"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import os
import re
import subprocess
def execute(cmd):
"""execute: Execute a given command.
:param cmd: string, given command.
Return:
------
Return a iterator over output.
"""
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def find_files( dirname, ext=None, name_contains=None, text_regex_search=None):
files = []
for d, sd, fs in os.walk(dirname):
for f in fs:
fpath = os.path.join(d,f)
include = True
if ext is not None:
if f.split('.')[-1] != ext:
include = False
if name_contains:
if name_contains not in os.path.basename(f):
include = False
if text_regex_search:
with open(fpath, 'r' ) as f:
txt = f.read()
if re.search(text_regex_search, txt) is None:
include = False
if include:
files.append(fpath)
return files
# Matplotlib text for running simulation. It make sures at each figure is saved
# to individual png files.
matplotlibText = """
print( '>>>> saving all figues')
import matplotlib.pyplot as plt
def multipage(filename, figs=None, dpi=200):
pp = PdfPages(filename)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
fig.savefig(pp, format='pdf')
pp.close()
def saveall(prefix='results', figs=None):
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for i, fig in enumerate(figs):
outfile = '%s.%d.png' % (prefix, i)
fig.savefig(outfile)
print( '>>>> %s saved.' % outfile )
plt.close()
try:
saveall()
except Exception as e:
print( '>>>> Error in saving: %s' % e )
quit(0)
"""
|
gpl-3.0
|
dseuss/pythonlibs
|
tools/plot.py
|
1
|
3843
|
#!/usr/bin/env python
# encoding: utf-8
"""Tools making everyday plotting tasks easier."""
from __future__ import division, print_function
from math import ceil
import numpy as np
from matplotlib import pyplot as pl
from mpl_toolkits.axes_grid1 import make_axes_locatable, ImageGrid
def plot(function, intervall, num=500, axis=None, **kwargs):
"""Plots the function f on the axisis axis on the intervall (xmin, xmaxis)
:param function: Functions or list of function to plot
:param intervall: Intervall to plot function on (xmin, xmaxis)
:param num: Number of points used for the plot (default 500)
:param axis: Axis to plot on (default current axisis)
:returns: Plot (or list of plots)
"""
if hasattr(function, '__iter__'):
return [plot(f, intervall, num, axis, **kwargs) for f in function]
else:
x = np.linspace(*intervall, num=num)
axis = pl.gca() if axis is None else axis
return axis.plot(x, function(x), **kwargs)
def _imshow_formater(arr):
"""Creates a formating function to show the values of imshow in the status
bar.
:param arr: Array to be shown
:returns: function formater(x, y) that should be set to ax.format_coord
"""
def format_coord(x, y):
col, row = int(x + .5), int(y + .5)
if (col >= 0) and (col < arr.shape[1]) \
and (row >= 0) and (row < arr.shape[0]):
return "x={}, y={}, val={}".format(col, row, arr[row, col])
else:
return "x={}, y={}".format(col, row)
return format_coord
def imshow(img, fig=None, **kwargs):
"""Shows the image `img` passed as numpy array in a much prettier way
:param np.ndarray img: Image to show passed as RGB or grayscale image
:param ax: Axis to use for plot (default: current axis)
:param bool show: Whether to call pl.show() afterwards
:param kwargs: Keyword arguments passed to imshow
"""
assert 'interpolation' not in kwargs
fig = fig if fig is not None else pl.gcf()
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
ax.imshow(img, interpolation='nearest', **kwargs)
return fig
def imsshow(images, grid=None, **kwargs):
if grid is None:
grid = (min(len(images), 5), -1)
assert any(g > 0 for g in grid)
grid_x = grid[0] if grid[0] > 0 else ceil(len(images) / grid[1])
grid_y = grid[1] if grid[1] > 0 else ceil(len(images) / grid[0])
axes = ImageGrid(pl.gcf(), "111", (grid_y, grid_x), share_all=True)
for ax, img in zip(axes, images):
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.imshow(img, **kwargs)
return axes
def rgb2gray(img):
"""Converts a RGB encoded image to grayscale
:param img: n-dim Array, with the last dimension of size 3 encoding RGB
:returns: (n-1) dim Array of shape img.shape[:-1]
"""
return np.dot(img, (0.2989, 0.5870, 0.1140))
def matshow(mat, ax=None, show=True, **kwargs):
"""Shows the real matrix mat as img -- similar to imshow, but with
different defaults
:param np.ndarray img: Image to show passed as RGB or grayscale image
:param ax: Axis to use for plot (default: current axis)
:param bool show: Whether to call pl.show() afterwards
:param kwargs: Keyword arguments passed to imshow
"""
if ax is None:
ax = pl.gca()
ax.grid(False)
# ax.set_xticklabels([])
# ax.set_yticklabels([])
res = ax.imshow(mat, interpolation='nearest', **kwargs)
ax.axis((-.5, mat.shape[1] - .5, mat.shape[0] - .5, -.5))
ax.format_coord = _imshow_formater(mat)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cb = pl.colorbar(res, cax=cax)
cb.ax.tick_params(axis='y', direction='out')
if show:
pl.show()
return res
|
unlicense
|
Srisai85/scikit-learn
|
examples/feature_selection/plot_rfe_with_cross_validation.py
|
226
|
1384
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
|
bsd-3-clause
|
MechCoder/scikit-learn
|
examples/preprocessing/plot_function_transformer.py
|
158
|
1993
|
"""
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
|
bsd-3-clause
|
ofgulban/scikit-image
|
skimage/io/tests/test_plugin.py
|
11
|
3427
|
from contextlib import contextmanager
from numpy.testing import assert_equal, raises
from skimage import io
from skimage.io import manage_plugins
io.use_plugin('pil')
priority_plugin = 'pil'
def setup_module():
manage_plugins.use_plugin('test') # see ../_plugins/test_plugin.py
def teardown_module():
io.reset_plugins()
@contextmanager
def protect_preferred_plugins():
"""Contexts where `preferred_plugins` can be modified w/o side-effects."""
preferred_plugins = manage_plugins.preferred_plugins.copy()
try:
yield
finally:
manage_plugins.preferred_plugins = preferred_plugins
def test_read():
io.imread('test.png', as_grey=True, dtype='i4', plugin='test')
def test_save():
io.imsave('test.png', [1, 2, 3], plugin='test')
def test_show():
io.imshow([1, 2, 3], plugin_arg=(1, 2), plugin='test')
def test_collection():
ic = io.imread_collection('*.png', conserve_memory=False, plugin='test')
io.imshow_collection(ic)
def test_use():
manage_plugins.use_plugin('test')
manage_plugins.use_plugin('test', 'imshow')
@raises(ValueError)
def test_failed_use():
manage_plugins.use_plugin('asd')
def test_use_priority():
manage_plugins.use_plugin(priority_plugin)
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, priority_plugin)
manage_plugins.use_plugin('test')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'test')
def test_use_priority_with_func():
manage_plugins.use_plugin('pil')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'pil')
manage_plugins.use_plugin('test', 'imread')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'test')
plug, func = manage_plugins.plugin_store['imsave'][0]
assert_equal(plug, 'pil')
manage_plugins.use_plugin('test')
plug, func = manage_plugins.plugin_store['imsave'][0]
assert_equal(plug, 'test')
def test_plugin_order():
p = io.plugin_order()
assert 'imread' in p
assert 'test' in p['imread']
def test_available():
assert 'qt' in io.available_plugins
assert 'test' in io.find_available_plugins(loaded=True)
def test_load_preferred_plugins_all():
from skimage.io._plugins import pil_plugin, matplotlib_plugin
with protect_preferred_plugins():
manage_plugins.preferred_plugins = {'all': ['pil'],
'imshow': ['matplotlib']}
manage_plugins.reset_plugins()
for plugin_type in ('imread', 'imsave'):
plug, func = manage_plugins.plugin_store[plugin_type][0]
assert func == getattr(pil_plugin, plugin_type)
plug, func = manage_plugins.plugin_store['imshow'][0]
assert func == getattr(matplotlib_plugin, 'imshow')
def test_load_preferred_plugins_imread():
from skimage.io._plugins import pil_plugin, matplotlib_plugin
with protect_preferred_plugins():
manage_plugins.preferred_plugins['imread'] = ['pil']
manage_plugins.reset_plugins()
plug, func = manage_plugins.plugin_store['imread'][0]
assert func == pil_plugin.imread
plug, func = manage_plugins.plugin_store['imshow'][0]
assert func == matplotlib_plugin.imshow, func.__module__
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
bsd-3-clause
|
russel1237/scikit-learn
|
benchmarks/bench_plot_incremental_pca.py
|
374
|
6430
|
"""
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
|
bsd-3-clause
|
madjelan/scikit-learn
|
sklearn/tests/test_pipeline.py
|
162
|
14875
|
"""
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
|
bsd-3-clause
|
OlympusMonds/PTA
|
public_transport_analyser/database/db_analyser.py
|
1
|
7337
|
"""
Mostly for debugging for now, this script uses matplotlib to plot origins,
destinations, and their links. Eventually it will plot the ratio of driving
vs. transit for various times. It is also a prototype for the website that
will no double come later.
"""
import sys
import os
import pony.orm as pny
from public_transport_analyser.database.database import Origin, Destination, Trip, init
def mess():
with pny.db_session:
o = Origin.select_random(limit=1)[0]
print(o.location)
def count_origins():
with pny.db_session:
origins = pny.select(pny.count(o) for o in Origin).first()
return origins
def count_destinations():
with pny.db_session:
destinations = pny.select(pny.count(d) for d in Destination).first()
return destinations
def count_trips():
with pny.db_session:
trips = pny.select(pny.count(t) for t in Trip).first()
return trips
def delete_bad_routes():
num_bad_origins = 0
with pny.db_session:
origins = pny.select(o for o in Origin)[:]
for o in origins:
if len(o.destinations) <= 1:
num_bad_origins += 1
for d in o.destinations:
for t in d.trips:
t.delete()
d.delete()
o.delete()
return num_bad_origins
def count_bad_routes():
num_bad_origins = 0
with pny.db_session:
origins = pny.select(o for o in Origin)[:]
for o in origins:
if len(o.destinations) <= 1:
num_bad_origins += 1
return num_bad_origins
def origin_bench():
lonlats = []
with pny.db_session:
#origins = pny.select(o for o in Origin)[:]
origins = pny.select((o.location, pny.count(o.destinations)) for o in Origin)[:]
#for o in origins:
# lat, lon = map(float, o.location.split(","))
# lonlats.append((lon, lat, len(o.destinations)))
print(origins)
for i in origins:
print(i)#, len(i.destinations))
def origin_stats():
max_dests = -1
min_dests = 1e6
avg_dests = 0
count = 0
max_route = None
with pny.db_session:
origins = pny.select(o for o in Origin)[:]
for o in origins:
num_dests = len(o.destinations)
if num_dests > max_dests:
max_dests = num_dests
max_route = "{}".format(o.location)
min_dests = min(min_dests, num_dests)
avg_dests += num_dests
count += 1
return max_dests, min_dests, avg_dests/float(count), max_route
def avg_ratio():
ratios = []
with pny.db_session:
origins = pny.select(o for o in Origin)[:]
for o in origins:
for d in o.destinations:
pt_avg = []
driving = None
for t in d.trips:
if t.mode == "transit":
pt_avg.append(float(t.duration))
else:
driving = float(t.duration)
if pt_avg and driving:
pt_avg = sum(pt_avg) / len(pt_avg)
if pt_avg != 0:
ratios.append(driving / pt_avg)
else:
print(" error: route {}_{} has an average transit duration of 0".format(o, d))
for t in d.trips:
print(" mode: {}, duration: {}, distance: {}".format(t.mode, t.duration, t.distance))
return ratios
def route_stats():
max_trips = -1
min_trips = 1e6
avg_trips = 0
count = 0
max_route = None
min_route = None
with pny.db_session:
origins = pny.select(o for o in Origin)[:]
for o in origins:
for d in o.destinations:
num_trips = len(d.trips)
if num_trips > max_trips:
max_trips = num_trips
max_route = "{}_{}".format(o.location, d.location)
if num_trips < min_trips:
min_trips = num_trips
min_route = "{}_{}".format(o.location, d.location)
avg_trips += num_trips
count += 1
return max_trips, min_trips, avg_trips/float(count), max_route, min_route
def count_each_origins_destinations():
with pny.db_session:
#oc = pny.select((o, pny.avg(t.duration)) for o in Origin
## for d in o.destinations
# for t in d.trips if t.mode == "transit")[:]
#dc = pny.select((d.origin, pny.avg(t.duration)) for d in Destination
# for t in d.trips if t.mode == "transit").without_distinct()[:]
#dc2 = pny.select((d.id, d.origin, pny.avg(t.duration)) for d in Destination
# for t in d.trips if t.mode == "transit")[:]
origins = pny.select((
d.origin.location,
pny.avg(t.duration for t in d.trips if t.mode == "transit"),
pny.avg(t.duration for t in d.trips if t.mode == "driving")
)
for d in Destination)[:]
print(len(origins))
origins = pny.select((
o.location,
pny.avg(t.duration for d in o.destinations for t in d.trips if t.mode == "driving"),
pny.avg(t.duration for d in o.destinations for t in d.trips if t.mode == "transit")
)
for o in Origin
)[:]
print(len(origins))
# pny.select((d.id, d.origin, pny.avg(t.duration))
# for t in d.trips if t.mode == "transit"),
#
# pny.select((d.id, d.origin, t.duration)
# for t in d.trips if t.mode == "driving")
#print(len(oc))
#print(len(dc))
#print(len(dc2))
print("Query")
fails = []
for i, o in enumerate(origins):
print(i, o)
print("\n".join(map(str, fails)))
# for a, b in sorted(dc, key=lambda x: x[0]):
# print(a)
def analyser():
count_each_origins_destinations()
#mess()
"""
print("Number of origins: {}".format(count_origins()))
print("Number of destinations: {}".format(count_destinations()))
print("Number of trips: {}".format(count_trips()))
print("Number of bad origins: {}".format(count_bad_routes()))
max_dests, min_dests, avg_dests, max_route = origin_stats()
print("Max destinations on a route: {} ({})".format(max_dests, max_route))
print("Min destinations on a route: {}".format(min_dests))
print("Avg destinations on a route: {}".format(avg_dests))
ratios = avg_ratio()
print("Avg ratio for all routes: {}".format(sum(ratios) / len(ratios)))
delete_bad_routes()
"""
"""
max_trips, min_trips, avg_trips, max_route, min_route = route_stats()
print("Max trips on a route: {} ({})".format(max_trips, max_route))
print("Min trips on a route: {} ({})".format(min_trips, min_route))
print("Avg trips on a route: {}".format(avg_trips))
"""
if __name__ == "__main__":
init()
sys.exit(analyser())
|
gpl-3.0
|
ssaeger/scikit-learn
|
examples/svm/plot_svm_nonlinear.py
|
268
|
1091
|
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
bsd-3-clause
|
binghongcha08/pyQMD
|
QMC/MC_exchange/permute4d/dissipation/2.5/en.py
|
15
|
1291
|
import numpy as np
import pylab as plt
import matplotlib.pyplot as plt
import matplotlib as mpl
#data = np.genfromtxt(fname='/home/bing/dissipation/energy.dat')
data = np.genfromtxt(fname='energy.dat')
fig, (ax1,ax2) = plt.subplots(ncols=1, nrows=2, sharex=True)
#font = {'family' : 'ubuntu',
# 'weight' : 'normal',
# 'size' : '16'}
#mpl.rc('font', **font) # pass in the font dict as kwargs
mpl.rcParams['font.size'] = 12
#mpl.rcParams['figure.figsize'] = 8,6
#pl.title('two-steps fitting alg')
ax1.set_ylabel('Energy [hartree]')
ax1.plot(data[:,0],data[:,2],'b--',linewidth=2,label='Potential')
#pl.plot(dat[:,0],dat[:,2],'r-',linewidth=2)
ax1.plot(data[:,0],data[:,3],'g-.',linewidth=2,label='Quantum Potential')
ax1.plot(data[:,0],data[:,4],'k-',linewidth=2,label='Energy')
#pl.legend(bbox_to_anchor=(0.5, 0.38, 0.42, .302), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#ax1.set_yticks((0.4,0.6,0.8))
ax1.legend(loc=0)
#ax1.set_ylim(0,5)
ax2.set_xlabel('time [a.u.]')
ax2.set_ylabel('Energy [hartree]')
ax2.plot(data[:,0],data[:,1],'r--',linewidth=2,label='$Kinetic$')
#pl.plot(dat[:,0],dat[:,1],'k-',linewidth=2)
ax2.set_yscale('log')
#ax2.set_xticks((0,4,8))
#ax2.set_yticks((1e-7,1e-5,1e-3))
plt.legend(loc=0)
plt.subplots_adjust(hspace=0.)
plt.show()
|
gpl-3.0
|
BigUtrecht/BigUtrecht
|
analysis/budget.py
|
1
|
2474
|
from sys import maxsize
import matplotlib.pyplot as plt
from pyspark.sql import functions as F
from pyspark.sql.window import Window
from constants.spark import *
from etl import parquet
def createOverallBudget():
"""
Overall budget analysis function
Calculates the overall budget
:return: None
"""
with Session() as spark:
flow = parquet.readResults(spark, "flow")
flow.registerTempTable('flow')
globalflow = spark.sql(
"SELECT Timestamp, max(Datum) Datum, max(Tijd) Tijd, sum(Flow) Flow, sum(Volume) Volume FROM flow "
"GROUP BY Timestamp ORDER BY Timestamp")
budget = globalflow.select(globalflow.Timestamp, globalflow.Datum, globalflow.Tijd, globalflow.Flow,
globalflow.Volume,
F.sum(globalflow.Flow).over(
Window.orderBy("Timestamp").rowsBetween(-maxsize, 0)).alias("Budget"))
parquet.saveResults(spark, budget, 'overallbudget')
def createDailyBudget():
"""
Daily budget analysis function
Calculates the daily budget
:return: None
"""
with Session() as spark:
flow = parquet.readResults(spark, "flow")
flow.registerTempTable('flow')
globalflow = spark.sql(
"SELECT Timestamp, max(Datum) Datum, max(Tijd) Tijd, sum(Flow) Flow, sum(Volume) Volume FROM flow "
"GROUP BY Timestamp ORDER BY Timestamp")
budget = globalflow.select(globalflow.Timestamp, globalflow.Datum, globalflow.Tijd, globalflow.Flow,
globalflow.Volume,
F.sum(globalflow.Flow).over(
Window.partitionBy("Datum").orderBy("Timestamp").rowsBetween(-maxsize, 0)).alias(
"Budget"))
parquet.saveResults(spark, budget, 'budget')
if __name__ == '__main__':
# createOverallBudget()
# createDailyBudget()
with Session() as spark:
budget = parquet.readResults(spark, 'budget')
budget.registerTempTable('budget')
pdbudget = spark.sql(
"SELECT Tijd, avg(Flow) Flow, min(Flow) MinFlow, max(Flow) MaxFlow, avg(Budget) Budget FROM budget "
"GROUP BY Tijd ORDER BY max(Timestamp)").toPandas()
pdbudget.plot(x="Tijd", y=["Flow", "MinFlow", "MaxFlow"], kind='line', color=["green", "red", "blue"])
plt.show()
|
gpl-3.0
|
r-mart/scikit-learn
|
examples/tree/plot_tree_regression_multioutput.py
|
206
|
1800
|
"""
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
bundgus/python-playground
|
matplotlib-playground/examples/pylab_examples/usetex_baseline_test.py
|
1
|
2026
|
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.axes as maxes
from matplotlib import rcParams
rcParams['text.usetex'] = True
rcParams['text.latex.unicode'] = True
class Axes(maxes.Axes):
"""
A hackish way to simultaneously draw texts w/ usetex=True and
usetex=False in the same figure. It does not work in the ps backend.
"""
def __init__(self, *kl, **kw):
self.usetex = kw.pop("usetex", "False")
self.preview = kw.pop("preview", "False")
maxes.Axes.__init__(self, *kl, **kw)
def draw(self, renderer):
usetex = plt.rcParams["text.usetex"]
preview = plt.rcParams["text.latex.preview"]
plt.rcParams["text.usetex"] = self.usetex
plt.rcParams["text.latex.preview"] = self.preview
maxes.Axes.draw(self, renderer)
plt.rcParams["text.usetex"] = usetex
plt.rcParams["text.latex.preview"] = preview
subplot = maxes.subplot_class_factory(Axes)
def test_window_extent(ax, usetex, preview):
va = "baseline"
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
#t = ax.text(0., 0., r"mlp", va="baseline", size=150)
text_kw = dict(va=va,
size=50,
bbox=dict(pad=0., ec="k", fc="none"))
test_strings = ["lg", r"$\frac{1}{2}\pi$",
r"$p^{3^A}$", r"$p_{3_2}$"]
ax.axvline(0, color="r")
for i, s in enumerate(test_strings):
ax.axhline(i, color="r")
ax.text(0., 3 - i, s, **text_kw)
ax.set_xlim(-0.1, 1.1)
ax.set_ylim(-.8, 3.9)
ax.set_title("usetex=%s\npreview=%s" % (str(usetex), str(preview)))
fig = plt.figure(figsize=(2.*3, 6.5))
for i, usetex, preview in [[0, False, False],
[1, True, False],
[2, True, True]]:
ax = subplot(fig, 1, 3, i + 1, usetex=usetex, preview=preview)
fig.add_subplot(ax)
fig.subplots_adjust(top=0.85)
test_window_extent(ax, usetex=usetex, preview=preview)
plt.draw()
plt.show()
|
mit
|
tlhallock/line-search-dfo
|
python/test/perf_plots/read_perf_plot_data.py
|
1
|
1345
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import pickle
import re
filePattern = re.compile('([^_]*)_([0-9]*).p');
algos = {}
# point this at the output directory
for root, subFolders, files in os.walk('./runtimes/'):
for file in files:
m = filePattern.match(file)
if not m:
continue
algo = m.group(1)
nprob = m.group(2)
with open(os.path.join(root, file), "rb") as input:
if nprob not in algos:
algos[nprob] = {}
data = pickle.load(input)
algos[nprob][algo] = data
probNdx = [-1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, 14, 15, 16, 17, -1, 19, 20, 21]
algoIdx = {
'mine': 1,
'pyOpt': 2
}
with open('../../octave/assign_to_matlab.m', 'w') as log:
log.write('h = zeros(0,0,0);')
for nprob in algos.keys():
print(nprob)
for algo in algos[nprob].keys():
nfev = algos[nprob][algo]['nfev']
fvals = algos[nprob][algo]['fvals']
print('\t' + str(algo) + " " + str(nfev))
for i in range(len(fvals)):
log.write('h(' + str(i + 1) + ', ' + str(probNdx[int(nprob)]) + ', ' + str(algoIdx[algo]) + ') = ' + str(fvals[i][0]) + ';\n')
xs = np.asarray(range(nfev))
ys = np.zeros(nfev)
for i in range(nfev):
ys[i] = fvals[i][0]
plt.plot(xs, ys, label=algo)
plt.legend(loc='upper right')
plt.savefig('plots/' + nprob + '_performance.png')
plt.close()
|
gpl-3.0
|
akionakamura/scikit-learn
|
examples/ensemble/plot_voting_probas.py
|
316
|
2824
|
"""
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
|
bsd-3-clause
|
rajat1994/scikit-learn
|
sklearn/utils/random.py
|
234
|
10510
|
# Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
|
bsd-3-clause
|
JsNoNo/scikit-learn
|
examples/cluster/plot_lena_segmentation.py
|
271
|
2444
|
"""
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
|
bsd-3-clause
|
rmcgibbo/osprey
|
osprey/dataset_loaders.py
|
1
|
4548
|
from __future__ import print_function, absolute_import, division
import glob
import os
import numpy as np
from .utils import expand_path
class BaseDatasetLoader(object):
short_name = None
def load(self):
raise NotImplementedError('should be implemented in subclass')
class MSMBuilderDatasetLoader(BaseDatasetLoader):
short_name = 'msmbuilder'
def __init__(self, path, fmt=None, verbose=False):
self.path = path
self.fmt = fmt
self.verbose = verbose
def load(self):
from msmbuilder.dataset import dataset
ds = dataset(self.path, mode='r', fmt=self.fmt, verbose=self.verbose)
print('Dataset provenance:\n')
print(ds.provenance)
return ds, None
class NumpyDatasetLoader(BaseDatasetLoader):
short_name = 'numpy'
def __init__(self, filenames):
self.filenames = filenames
def load(self):
filenames = sorted(glob.glob(expand_path(self.filenames)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.filenames)
ds = [np.load(f) for f in filenames]
return ds, None
class MDTrajDatasetLoader(BaseDatasetLoader):
short_name = 'mdtraj'
def __init__(self, trajectories, topology=None, stride=1, verbose=False):
self.trajectories = trajectories
self.topology = topology
self.stride = stride
self.verbose = verbose
def load(self):
import mdtraj
filenames = sorted(glob.glob(expand_path(self.trajectories)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.trajectories)
top = self.topology
kwargs = {}
if top is not None:
top = expand_path(self.topology)
kwargs = {'top': top}
X = []
y = None
for fn in filenames:
if self.verbose:
print('[mdtraj] loading %s' % fn)
X.append(mdtraj.load(fn, stride=self.stride, **kwargs))
return X, y
class FilenameDatasetLoader(BaseDatasetLoader):
"""Just pass a bunch of filenames to the first step of the pipeline
The pipeline will do the loading.
"""
short_name = 'filename'
def __init__(self, trajectories, abs_path=True):
self.traj_glob = trajectories
self.abs_path = abs_path
def load(self):
filenames = sorted(glob.glob(expand_path(self.traj_glob)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.traj_glob)
if self.abs_path:
filenames = [os.path.abspath(fn) for fn in filenames]
return filenames, None
class JoblibDatasetLoader(BaseDatasetLoader):
short_name = 'joblib'
def __init__(self, filenames, x_name=None, y_name=None,
system_joblib=False):
self.filenames = filenames
self.x_name = x_name
self.y_name = y_name
self.system_joblib = system_joblib
def load(self):
if self.system_joblib:
import joblib
else:
from sklearn.externals import joblib
X, y = [], []
filenames = sorted(glob.glob(expand_path(self.filenames)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.filenames)
for fn in filenames:
obj = joblib.load(fn)
if isinstance(obj, (list, np.ndarray)):
X.append(obj)
else:
X.append(obj[self.x_name])
y.append(obj[self.y_name])
if len(X) == 1:
X = X[0]
if len(y) == 1:
y = y[0]
elif len(y) == 0:
y = None
return X, y
class SklearnDatasetLoader(BaseDatasetLoader):
short_name = 'sklearn_dataset'
def __init__(self, method, x_name='data', y_name='target', **kwargs):
self.method = method
self.x_name = x_name
self.y_name = y_name
self.kwargs = kwargs
def load(self):
import sklearn.datasets
try:
loader = getattr(sklearn.datasets, self.method)
except AttributeError:
raise RuntimeError('no %s in sklearn.datasets' % self.method)
bunch = loader(**self.kwargs)
X = bunch[self.x_name]
y = bunch[self.y_name]
return X, y
|
apache-2.0
|
wllmtrng/ggplot
|
ggplot/geoms/geom_bar.py
|
1
|
3061
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import matplotlib.cbook as cbook
from .geom import geom
from ggplot.utils import is_string
from ggplot.utils import is_categorical
class geom_bar(geom):
DEFAULT_AES = {'alpha': None, 'color': None, 'fill': '#333333',
'linetype': 'solid', 'size': 1.0, 'weight': None, 'y': None, 'width' : None}
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'stat': 'bin', 'position': 'stack'}
_extra_requires = {'y', 'width'}
_aes_renames = {'linetype': 'linestyle', 'size': 'linewidth',
'fill': 'color', 'color': 'edgecolor'}
# NOTE: Currently, geom_bar does not support mapping
# to alpha and linestyle. TODO: raise exception
_units = {'edgecolor', 'color', 'alpha', 'linestyle', 'linewidth'}
def __init__(self, *args, **kwargs):
# TODO: Change self.__class__ to geom_bar
super(geom_bar, self).__init__(*args, **kwargs)
self.bottom = None
self.ax = None
def _plot_unit(self, pinfo, ax):
categorical = is_categorical(pinfo['x'])
pinfo.pop('weight')
x = pinfo.pop('x')
width_elem = pinfo.pop('width')
# If width is unspecified, default is an array of 1's
if width_elem == None:
width = np.ones(len(x))
else :
width = np.array(width_elem)
# Make sure bottom is initialized and get heights. If we are working on
# a new plot (using facet_wrap or grid), then reset bottom
_reset = self.bottom is None or (self.ax != None and self.ax != ax)
self.bottom = np.zeros(len(x)) if _reset else self.bottom
self.ax = ax
heights = np.array(pinfo.pop('y'))
# layout and spacing
#
# matplotlib needs the left of each bin and it's width
# if x has numeric values then:
# - left = x - width/2
# otherwise x is categorical:
# - left = cummulative width of previous bins starting
# at zero for the first bin
#
# then add a uniform gap between each bin
# - the gap is a fraction of the width of the first bin
# and only applies when x is categorical
_left_gap = 0
_spacing_factor = 0 # of the bin width
if not categorical:
left = np.array([x[i]-width[i]/2 for i in range(len(x))])
else:
_left_gap = 0.2
_spacing_factor = 0.105 # of the bin width
_breaks = np.append([0], width)
left = np.cumsum(_breaks[:-1])
_sep = width[0] * _spacing_factor
left = left + _left_gap + [_sep * i for i in range(len(left))]
ax.bar(left, heights, width, bottom=self.bottom, **pinfo)
ax.autoscale()
if categorical:
ax.set_xticks(left+width/2)
ax.set_xticklabels(x)
# Update bottom positions
self.bottom = heights + self.bottom
|
bsd-2-clause
|
sergio2pi/NeuroDB
|
test/test5.py
|
1
|
2324
|
'''
Created on Oct 21, 2014
@author: sergio
'''
import numpy as np
import ctypes
import numpy.ctypeslib as npct
import matplotlib.pyplot as plt
#cfsfd = ctypes.cdll.LoadLibrary('/home/sergio/iibm/sandbox/t.so')
#cfsfd.get_dc.restype = ctypes.c_float
#dc = cfsfd.get_dc("dbname=demo host=192.168.2.2 user=postgres password=postgres", "54")
#print dc
array_1d_double = npct.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
array_1d_int = npct.ndpointer(dtype=np.int64, ndim=1, flags='CONTIGUOUS')
libcd = npct.load_library("cfsfdp", "/home/sergio/iibm/workspace/NeuroDB/NeuroDB/cfunctions/cfsfdp")
libcd.get_local_density.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_float, array_1d_double, ctypes.c_char_p]
libcd.get_local_density.restype = ctypes.c_int
libcd.get_distance_to_higher_density.argtypes = [ctypes.c_char_p, ctypes.c_char_p, array_1d_double, array_1d_double, ctypes.c_int]
libcd.get_distance_to_higher_density.restype = ctypes.c_int
libcd.get_dc.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_float]
libcd.get_dc.restype = ctypes.c_float
libcd.get_cluster_dp.argtypes = [ctypes.c_char_p, ctypes.c_char_p, array_1d_double]
libcd.get_cluster_dp.restype = array_1d_double
dc = libcd.get_dc("dbname=demo host=192.168.2.2 user=postgres password=postgres", "54", 2.0)
local_density = np.empty(1026)
distance_to_higher_density = np.empty(1026)
print "dc: ", dc, type(dc)
libcd.get_local_density("dbname=demo host=192.168.2.2 user=postgres password=postgres", "54", dc, local_density, "gaussian")
libcd.get_distance_to_higher_density("dbname=demo host=192.168.2.2 user=postgres password=postgres", "54", local_density, distance_to_higher_density, len(local_density))
gamma = local_density*distance_to_higher_density
dp = distance_to_higher_density[local_density.argsort()]
dp2 = np.empty(1026)
for i in range(len(dp)):
dp2[i] = i * dp[i]
#gamma2 = libcd.get_cluster_dp("dbname=demo host=192.168.2.2 user=postgres password=postgres", "54")
# plt.subplot(4,1,1)
# plt.plot(local_density, 'o')
# plt.subplot(4,1,2)
# plt.plot(distance_to_higher_density, 'o')
plt.subplot(2,1,1)
plt.plot(local_density, distance_to_higher_density, 'o')
plt.subplot(2,1,2)
plt.plot(dp2, 'o')
plt.show()
#print dc("dbname=demo user=postgres password=postgres hostaddr=192.168.2.2 port=5432")
pass
|
gpl-2.0
|
GitYiheng/reinforcement_learning_test
|
test03_monte_carlo/cartpole_mc_ann.py
|
1
|
4964
|
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib.pyplot as plt
import time
test_num = 2
# Hyperparameters
RANDOM_NUMBER_SEED = test_num
ENVIRONMENT1 = "morph-v0"
MAX_EPISODES = 2000 # number of episodes
EPISODE_LENGTH = 200 # single episode length
HIDDEN_SIZE = 6
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
alpha = 0.1 # Learning rate
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 100 # Including previous 100 rewards
DIR_PATH_SAVEFIG = "/home/yh/cartpole_mc_ann/"
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_clr" + str(alpha).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = env.observation_space.shape[0]
try:
output_size = env.action_space.shape[0]
except AttributeError:
output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
optimizer = tf.train.RMSPropOptimizer(alpha)
# global_step = tf.Variable()
# optimizer = tf.train.RMSPropOptimizer(alpha)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
# saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
returns = []
mean_returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_return = np.mean(running_returns)
mean_returns.append(mean_return)
msg = "Episode: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(ep+1, raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
env.close()
# Plot
# plt.style.use('ggplot')
plt.style.use('dark_background')
episodes_plot = np.arange(MAX_EPISODES)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
ax.set_title("The Cart-Pole Problem Test %i \n \
Episode Length: %i \
Discount Factor: %.2f \n \
Number of Hidden Neuron: %i \
Constant Learning Rate: %.5f" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, alpha))
ax.set_xlabel("Episode")
ax.set_ylabel("Return")
ax.set_ylim((0, 200))
ax.grid(linestyle='--')
# ax.set_aspect(0.4)
ax.plot(episodes_plot, returns, label='Instant return')
ax.plot(episodes_plot, mean_returns, label='Averaged return')
legend = ax.legend(loc='best', shadow=True)
fig.savefig(DIR_PATH_SAVEFIG + file_name_savefig, dpi=500)
# plt.show()
|
mit
|
leojohnthomas/ahkab
|
ahkab.py
|
1
|
14908
|
#!/usr/bin/env python2.7-32
# -*- coding: iso-8859-1 -*-
# ahkab.py
# The frontend of the simulator
# Copyright 2006 Giuseppe Venturini
# This file is part of the ahkab simulator.
#
# Ahkab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# Ahkab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License v2
# along with ahkab. If not, see <http://www.gnu.org/licenses/>.
""" ahkab is a easy electronic circuit simulator.
"""
import sys
import numpy
import sympy
import matplotlib
from optparse import OptionParser
# analyses
import dc_analysis
import transient
import ac
import shooting
import bfpss
import symbolic
import netlist_parser
import options
import constants
import utilities
import plotting
import printing
__version__ = "0.06"
def process_analysis(an_list, circ, outfile, verbose, cli_tran_method=None, guess=True, disable_step_control=False):
""" Processes an analysis vector:
an_list: the list of analysis to be performed, as returned by netlist_parser
circ: the circuit instance, returned by netlist_parser
outfile: a filename. Results will be written to it. If set to stdout, prints to stdout
verbose: verbosity level
cli_tran_method: force the specified method in each tran analysis (see transient.py)
guess: use the builtin method get_dc_guess to guess x0
Returns: None
"""
x0_op = None
x0_ic_dict = {}
results = {}
for directive in [ x for x in an_list if x["type"] == "ic" ]:
x0_ic_dict.update({
directive["name"]:\
dc_analysis.build_x0_from_user_supplied_ic(circ, voltages_dict=directive["vdict"], currents_dict=directive["cdict"])
})
for an in an_list:
if outfile != 'stdout':
data_filename = outfile + "." + an["type"]
else:
data_filename = outfile
if an["type"] == "ic":
continue
if an["type"] == "op":
if an["guess_label"] is None:
x0_op = dc_analysis.op_analysis(circ, guess=guess, data_filename=data_filename, verbose=verbose)
else:
if not an["guess_label"] in x0_ic_dict:
printing.print_warning("op: guess is set but no matching .ic directive was found.")
printing.print_warning("op: using built-in guess method: "+str(guess))
x0_op = dc_analysis.op_analysis(circ, guess=guess, verbose=verbose)
else:
x0_op = dc_analysis.op_analysis(circ, guess=False, x0=x0_ic_dict[an["guess_label"]], verbose=verbose)
sol = x0_op
elif an["type"] == "dc":
if an["source_name"][0].lower() == "v":
elem_type = "vsource"
elif an["source_name"][0].lower() == "i":
elem_type = "isource"
else:
printing.print_general_error("Type of sweep source is unknown: " + an[1][0])
sys.exit(1)
sol = dc_analysis.dc_analysis(
circ, start=an["start"], stop=an["stop"], step=an["step"], \
type_descr=(elem_type, an["source_name"][1:]),
xguess=x0_op, data_filename=data_filename, guess=guess,
stype=an['stype'], verbose=verbose)
#{"type":"tran", "tstart":tstart, "tstop":tstop, "tstep":tstep, "uic":uic, "method":method, "ic_label":ic_label}
elif an["type"] == "tran":
if cli_tran_method is not None:
tran_method = cli_tran_method.upper()
elif an["method"] is not None:
tran_method = an["method"].upper()
else:
tran_method = options.default_tran_method
# setup the initial condition (t=0) according to uic
# uic = 0 -> all node voltages and currents are zero
# uic = 1 -> node voltages and currents are those computed in the last OP analysis
# uic = 2 -> node voltages and currents are those computed in the last OP analysis
# combined with the ic=XX directive found in capacitors and inductors
# uic = 3 -> use a .ic directive defined by the user
uic = an["uic"]
if uic == 0:
x0 = None
elif uic == 1:
if x0_op is None:
printing.print_general_error("uic is set to 1, but no op has been calculated yet.")
sys.exit(51)
x0 = x0_op
elif uic == 2:
if x0_op is None:
printing.print_general_error("uic is set to 2, but no op has been calculated yet.")
sys.exit(51)
x0 = dc_analysis.modify_x0_for_ic(circ, x0_op)
elif uic == 3:
if an["ic_label"] is None:
printing.print_general_error("uic is set to 3, but param ic=<ic_label> was not defined.")
sys.exit(53)
elif not an["ic_label"] in x0_ic_dict:
printing.print_general_error("uic is set to 3, but no .ic directive named %s was found." \
%(str(an["ic_label"]),))
sys.exit(54)
x0 = x0_ic_dict[an["ic_label"]]
sol = transient.transient_analysis(circ, \
tstart=an["tstart"], tstep=an["tstep"], tstop=an["tstop"], \
x0=x0, mna=None, N=None, verbose=verbose, data_filename=data_filename, \
use_step_control=(not disable_step_control), method=tran_method)
elif an["type"] == "shooting":
if an["method"]=="brute-force":
sol = bfpss.bfpss(circ, period=an["period"], step=an["step"], mna=None, Tf=None, \
D=None, points=an["points"], autonomous=an["autonomous"], x0=x0_op, \
data_filename=data_filename, verbose=verbose)
elif an["method"]=="shooting":
sol = shooting.shooting(circ, period=an["period"], step=an["step"], mna=None, \
Tf=None, D=None, points=an["points"], autonomous=an["autonomous"], \
data_filename=data_filename, verbose=verbose)
elif an["type"] == "symbolic":
if not 'subs' in an.keys():
an.update({'subs':None})
sol = symbolic.solve(circ, an['source'], opts={'ac':an['ac']}, subs=an['subs'], verbose=verbose)
elif an["type"] == "ac":
sol = ac.ac_analysis(circ=circ, start=an['start'], nsteps=an['nsteps'], \
stop=an['stop'], step_type='LOG', xop=x0_op, mna=None,\
data_filename=data_filename, verbose=verbose)
elif an["type"] == "temp":
constants.T = utilities.Celsius2Kelvin(an['temp'])
results.update({an["type"]:sol})
return results
def process_postproc(postproc_list, title, results, outfilename, remote=False):
"""Runs the post-processing operations, such as plotting.
postproc_list: list of post processing operations as returned by main()
title: the deck title
results: the results to be plotted (including the ones that are not needed)
outfilename: if the plots are saved to disk, this is the filename without extension
remote: boolean, do not show plots if True (such as ssh without X11 forwarding)
Returns: None
"""
index = 0
if outfilename == 'stdout':
printing.print_warning("Plotting and printing the results to stdout are incompatible options. Plotting skipped.")
return
for postproc in postproc_list:
#print postproc["analysis"], results.keys(), results.has_key(postproc["analysis"]), results[postproc["analysis"]] is None #DEBUG
plotting.plot_results(title, postproc["x"], postproc["l2l1"], results[postproc["analysis"]], "%s-%d.%s" % (outfilename, index, options.plotting_outtype))
index = index +1
if len(postproc_list) and not remote:
plotting.show_plots()
return None
def main(filename, outfile="stdout", tran_method=transient.TRAP.lower(), no_step_control=False, dc_guess='guess', print_circuit=False, remote=True, verbose=3):
"""This method allows calling ahkab from a Python script.
"""
printing.print_info_line(("This is ahkab %s running with:" %(__version__),6), verbose)
printing.print_info_line((" Python %s" % (sys.version.split('\n')[0],),6), verbose)
printing.print_info_line((" Numpy %s" % (numpy.__version__),6), verbose)
printing.print_info_line((" Sympy %s" % (sympy.__version__),6), verbose)
printing.print_info_line((" Matplotlib %s" % (matplotlib.__version__),6), verbose)
utilities._set_execution_lock()
read_netlist_from_stdin = (filename is None or filename == "-")
(circ, directives, postproc_direct) = netlist_parser.parse_circuit(filename, read_netlist_from_stdin)
check, reason = dc_analysis.check_circuit(circ)
if not check:
printing.print_general_error(reason)
printing.print_circuit(circ)
sys.exit(3)
if verbose > 3 or print_circuit:
print "Parsed circuit:"
printing.print_circuit(circ)
elif verbose > 1:
print circ.title.upper()
an_list = netlist_parser.parse_analysis(circ, directives)
postproc_list = netlist_parser.parse_postproc(circ, an_list, postproc_direct)
if len(an_list) > 0:
printing.print_info_line(("Requested an.:", 4), verbose)
if verbose >= 4:
map(printing.print_analysis, an_list)
else:
if verbose:
printing.print_warning("No analysis requested.")
if len(an_list) > 0:
results = process_analysis(an_list, circ, outfile, verbose, guess=dc_guess.lower()=="guess", \
cli_tran_method=tran_method, disable_step_control=no_step_control)
else:
printing.print_warning("Nothing to do. Quitting.")
if len(an_list) > 0 and len(postproc_list) > 0 and len(results):
process_postproc(postproc_list, circ.title, results, outfile, remote)
utilities._unset_execution_lock()
return results
if __name__ == "__main__":
parser = OptionParser(usage="usage: \t%prog [options] <filename>\n\nThe filename is the netlist to be open. Use - (a dash) to read from stdin.", version="%prog "+__version__+u" (c) 2006-2013 Giuseppe Venturini")
#general options
parser.add_option("-v", "--verbose", action="store", type="string", dest="verbose", default="3", help="Verbose level: from 0 (almost silent) to 5 (debug)")
parser.add_option("-p", "--print", action="store_true", dest="print_circuit", default=False, help="Print the parsed circuit")
parser.add_option("-o", "--outfile", action="store", type="string", dest="outfile", default="stdout", help="Data output file. Defaults to stdout.")
parser.add_option("", "--dc-guess", action="store", type="string", dest="dc_guess", default="guess", help="Guess to be used to start a op or dc analysis: none or guess. Defaults to guess.")
parser.add_option("-t", "--tran-method", action="store", type="string", dest="method", default=transient.TRAP.lower(), help="Method to be used in transient analysis: " +transient.IMPLICIT_EULER.lower()+", "+transient.TRAP.lower()+", "+transient.GEAR2.lower()+", "+transient.GEAR3.lower()+", "+transient.GEAR4.lower()+", "+transient.GEAR5.lower()+" or "+transient.GEAR6.lower()+". Defaults to TRAP.")
parser.add_option("", "--t-fixed-step", action="store_true", dest="no_step_control", default=False, help="Disables the step control in transient analysis. Useful if you want to perform a FFT on the results.")
parser.add_option("", "--v-absolute-tolerance", action="store", type="string", dest="vea", default=None, help="Voltage absolute tolerance. Default: "+str(options.vea)+" V")
parser.add_option("", "--v-relative-tolerance", action="store", type="string", dest="ver", default=None, help="Voltage relative tolerance. Default: "+str(options.ver))
parser.add_option("", "--i-absolute-tolerance", action="store", type="string", dest="iea", default=None, help="Current absolute tolerance. Default: "+str(options.iea)+" A")
parser.add_option("", "--i-relative-tolerance", action="store", type="string", dest="ier", default=None, help="Current relative tolerance. Default: "+str(options.ier))
parser.add_option("", "--h-min", action="store", type="string", dest="hmin", default=None, help="Minimum time step. Default: "+str(options.hmin))
parser.add_option("", "--dc-max-nr", action="store", type="string", dest="dc_max_nr_iter", default=None, help="Maximum nr of NR iterations for dc analysis. Default: "+str(options.dc_max_nr_iter))
parser.add_option("", "--t-max-nr", action="store", type="string", dest="transient_max_nr_iter", default=None, help="Maximum nr of NR iterations for each time step during transient analysis. Default: "+str(options.transient_max_nr_iter))
parser.add_option("", "--t-max-time", action="store", type="string", dest="transient_max_time_iter", default=None, help="Maximum nr of time iterations during transient analysis. Setting it to 0 (zero) disables the limit. Default: "+str(options.transient_max_time_iter))
parser.add_option("", "--s-max-nr", action="store", type="string", dest="shooting_max_nr_iter", default=None, help="Maximum nr of NR iterations during shooting analysis. Setting it to 0 (zero) disables the limit. Default: "+str(options.shooting_max_nr_iter))
parser.add_option("", "--gmin", action="store", type="string", dest="gmin", default=None, help="The minimum conductance to ground. Inserted when requested. Default: "+str(options.gmin))
parser.add_option("", "--cmin", action="store", type="string", dest="cmin", default=None, help="The minimum capacitance to ground. Default: "+str(options.cmin))
parser.add_option("", "--eps", action="store_true", dest="eps", default=False, help="Calculate the machine precision. The machine precision defaults to "+str(utilities.EPS))
(cli_options, remaning_args) = parser.parse_args()
verbose = int(cli_options.verbose)
if cli_options.method is not None:
method = cli_options.method.upper()
if cli_options.vea is not None:
options.vea = float(cli_options.vea)
if cli_options.ver is not None:
options.ver = float(cli_options.ver)
if cli_options.iea is not None:
options.iea = float(cli_options.iea)
if cli_options.ier is not None:
options.ier = float(cli_options.ier)
if cli_options.hmin is not None:
options.hmin = float(cli_options.hmin)
if cli_options.dc_max_nr_iter is not None:
options.dc_max_nr_iter = int(float(cli_options.dc_max_nr_iter))
if cli_options.transient_max_nr_iter is not None:
options.transient_max_nr_iter = int(float(cli_options.transient_max_nr_iter))
if cli_options.transient_max_time_iter is not None:
options.transient_max_time_iter = int(float(cli_options.transient_max_time_iter))
if cli_options.shooting_max_nr_iter is not None:
options.shooting_max_nr_iter = int(float(cli_options.shooting_max_nr_iter))
if cli_options.gmin is not None:
options.gmin = float(cli_options.gmin)
if cli_options.cmin is not None:
options.cmin = float(cli_options.cmin)
if cli_options.eps:
utilities.EPS = utilities.calc_eps()
print "Detected machine precision: " + str(utilities.EPS)
if not len(remaning_args) == 1:
print "Usage: ./ahkab.py [options] <filename>\n./ahkab.py -h for help"
sys.exit(1)
if remaning_args[0] == '-':
read_netlist_from_stdin = True
else:
read_netlist_from_stdin = False
if not read_netlist_from_stdin and not utilities.check_file(remaning_args[0]):
sys.exit(23)
# Program execution
main(filename=remaning_args[0], outfile=cli_options.outfile, tran_method=cli_options.method, \
no_step_control=cli_options.no_step_control, dc_guess=cli_options.dc_guess, \
print_circuit=cli_options.print_circuit, remote=False, verbose=verbose)
sys.exit(0)
|
gpl-2.0
|
puotila/PORA-IP
|
HiroshisArcticTS.py
|
1
|
2451
|
#!/usr/bin/env python
"""
Read Hiroshis (AWI) Arctic T, S climatology
based on recent cruises. Essentially replaces PHC3.
As it is sparse, where a value is missing it is replaced
by WOA13 value (which may be nonsense).
"""
import sys
sys.path.append('/home/uotilap/tiede/ORA-IP/annual_mean')
import numpy as np
import netCDF4 as nc
from plotAnnuaMeanProfile import LevelBounds
class Hiroshis(object):
def __init__(self,fn='./ts-clim/hiroshis-clim/archive_v12_QC2_3_DPL_checked_2d_season_int-remapbil-oraip.nc'):
self.dset, self.syr, self.eyr = 'Sumata', 1980, 2015
fp = nc.Dataset(fn)
self.olon = np.array(fp.variables['lon'][:])
# transfer negative lons to positive
self.olon[np.where(self.olon<0.)] += 360.
self.olat = np.array(fp.variables['lat'][:])
self.odepth = np.array(fp.variables['depth'][:])
self.temperature = np.ma.array(fp.variables['temperature'][:])
self.salinity = np.ma.array(fp.variables['salinity'][:])
fp.close()
def getPoint(self,plon,plat,vname):
""" get the closest point of (plon,plat)
"""
self.vname, self.lon, self.lat = vname, plon, plat
iy = np.where(np.abs(plat-self.olat)==np.min(np.abs(plat-self.olat)))[0][0]
ix = np.where(np.abs(plon-self.olon)==np.min(np.abs(plon-self.olon)))[0][0]
print "Looking for (%f,%f), closest at (%f,%f)" % \
(plon,plat,self.olon[ix],self.olat[iy])
depth = self.odepth
if vname=='S':
data, level_bounds = self.salinity, LevelBounds['S']
else:
data, level_bounds = self.temperature, LevelBounds['T']
tavg_data = np.ma.average(data,axis=0)
# vertical layer averaging
self.data = []
for lb in level_bounds:
iz = np.where((depth>=lb[0])&(depth<lb[1]))
self.data.append(tavg_data[iz].mean())
print "Averaged layer %d-%d" % (lb[0],lb[1])
self.depth = np.hstack((level_bounds[:,0],4000))
print "depth:", self.depth
if __name__ == "__main__":
lon, lat = 98., 83.
hrs = Hiroshis()
hrs.getPoint(lon,lat,'T')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5,10))
ax = fig.add_subplot(1,1,1)
ax.plot(np.ma.hstack((hrs.data,hrs.data[-1])),\
-hrs.depth,\
lw=2,drawstyle='steps-pre')
ax.set_ylabel('depth [m]')
plt.show()
print "Finnished!"
|
gpl-3.0
|
chrhartm/SORN
|
chartmann/plot_cluster.py
|
2
|
72584
|
from __future__ import division
from pylab import *
from scipy.optimize import curve_fit
from scipy import stats
import tables
import matplotlib.cm as mplcm
import matplotlib.colors as colors
import sys
sys.path.insert(0,"..")
import os
import utils
utils.backup(__file__)
import cPickle as pickle
import gzip
from common.sources import TrialSource
from utils.plotting import pretty_mpl_defaults
matlab_comparison = True # for FF
# Figure type (eps,pdf,png,...)
ftype = 'pdf'
# Data to plot
path = r'/home/chartmann/Desktop/Meeting Plots/2015-12-10_weightdepfailure_alignment/cluster_long_06_01_2015-12-09_17-06-48/common'
datafile = 'result.h5'
'''
Label significant differences in bar plots
Adapted from http://stackoverflow.com/questions/11517986/indicating-the-statistically-significant-difference-in-bar-graph
Parameters:
x1: x-value in plot to start bar from
x2: x-value in plot to end bar at
Y1: vector of datapoints corresponding to x1
Y2: vector of datapoints corresponding to x2
ax: axis to be plotted on
'''
def label_diff(x1,x2,Y1,Y2,ax):
# Testing
assert(len(Y1)==len(Y2))
(t,pval) = stats.ttest_ind(Y1,Y2)
if pval>0.05:
return
# If significant, draw bar
N = len(Y1)
x = mean([x1,x2])
# Estimate how high to draw
y = max(mean(Y1)+1.*std(Y1)/sqrt(N),mean(Y2)+1.*std(Y2)/sqrt(N))
# Draw
props = {'connectionstyle':'bar,fraction=0.15','arrowstyle':'-',
'lw':2,'color':'k'}
ax.annotate('*', xy=(x,1.05*y), zorder=10, ha='center')
ax.annotate('', xy=(x1,y), xytext=(x2,y), arrowprops=props)
# Extend figure height if bar out of range
ylimit = ax.get_ylim()
maxy = 1.1*y
if ylimit[1] < maxy:
ax.set_ylim((ylimit[0],maxy))
def errorspan(x,y,yerr,**kwargs):
# , gets first item in list
line, = plot(x,y,**kwargs)
fill_between(x,y-yerr,y+yerr,alpha=0.5,facecolor=line.get_color())
# This contains LOTS of code duplication with plot_single...
def plot_results(result_path,result):
pretty_mpl_defaults()
final_path = os.path.join(result_path,result)
print final_path
h5 = tables.openFile(final_path,'r')
data = h5.root
if os.path.isdir(data.c.logfilepath[0]):
pickle_dir = data.c.logfilepath[0]
else:
pickle_dir = result_path
plots_path = os.path.join('..','plots')
if not os.path.exists(plots_path):
os.mkdir(plots_path)
os.chdir(plots_path)
# This ONLY works for the cluster when N_params == N_cores
#N = shape(data.paramtracker[0])[0]
#params = np.array([x/(10.0)+0.1 for x in range(10)])[:,None]
params = data.paramtracker[0]
N_params = shape(data.paramtracker)[1]
N_iterations = shape(data.paramtracker)[0]
param_name = data.c.cluster.vary_param[0]
param_name_plot = param_name
if param_name == 'source.prob':
filename = os.path.join(pickle_dir,
"source_plastic_%s_%.3f.pickle"
%(param_name,params[0]))
source_plastic = pickle.load(gzip.open(filename,"r"))
if isinstance(source_plastic,TrialSource):
source_plastic = source_plastic.source
words = source_plastic.words
param_name_plot = 'prior(%s)'%words[0]
elif param_name == 'W_ee.p_failure':
param_name_plot = 'Failure probability'
elif param_name == 'W_ee.bias':
param_name_plot = 'Pot. bias'
param_name_u = param_name.replace(' ','_')
print 'Iterations:', N_iterations
### Plot Activity Stats
if data.__contains__('activity') and False:
N = N_params
activities = np.zeros(N)
lookback = 3000
for i in range(N):
for j in range(np.shape(data.activity)[0]):
activities[i] += sum(data.activity[j][i][-lookback:])\
/(lookback*1.0)
activities[i] /= 1.0*np.shape(data.activity)[0]
figure()
plot(params,activities,'o')
title('Average activity vs. %s (%s)'
%(data.c.cluster.vary_param[0],
data.c.stats.file_suffix[0]))
xlabel('%s'%(data.c.cluster.vary_param[0]))
ylabel('Activity')
utils.saveplot('Activity_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
if data.__contains__('meanactivity'):
test_words = data.c.source.test_words[0]
baseline = data.meanactivity[:,:,0]
act = {}
act_2nd = {}
start = 1
for word in test_words:
length = len(word)
act[word] = mean(data.meanactivity[:,:,start:start+length],
2)
act_2nd[word] = data.meanactivity[:,:,start+1]
start += length
# Colors from figures from paper
c_gray = '#929496'
c_blue = '#33348e'
c_red = '#cc2229'
c_green= '#33a457'
# Colors from figures from paper
ekw = dict(elinewidth=5,ecolor='k')#,capsize=0)
col = {'ABCD':c_blue,'DCBA':c_red,'A_CD':c_red,'E_CD':c_green}
if data.c.source.control:
condition = 'Control'
else:
condition = 'Experimental'
if data.c.cluster.vary_param[0] == 'source.control' \
and 'DCBA' in test_words:
figure()
bar(1,mean(baseline,0)[0],
yerr=std(baseline,0)[0]/sqrt(N_iterations),color=c_gray,
error_kw=ekw,label='Baseline',align='center')
bar(2,mean(act['ABCD'],0)[0],
yerr=std(act['ABCD'],0)[0]/sqrt(N_iterations),
color=c_blue,error_kw=ekw,label='ABCD',align='center')
bar(3,mean(act['DCBA'],0)[0],
yerr=std(act['DCBA'],0)[0]/sqrt(N_iterations),
color=c_red,error_kw=ekw,label='DCBA',align='center')
bar(5,mean(baseline,0)[1],
yerr=std(baseline,0)[1]/sqrt(N_iterations),color=c_gray,
error_kw=ekw,align='center')
bar(6,mean(act['ABCD'],0)[1],
yerr=std(act['ABCD'],0)[1]/sqrt(N_iterations),
color=c_blue,error_kw=ekw,align='center')
bar(7,mean(act['DCBA'],0)[1],
yerr=std(act['DCBA'],0)[1]/sqrt(N_iterations),
color=c_red,error_kw=ekw,align='center')
tick_params(axis='x',which='both',bottom='off',top='off')
# Test significances
label_diff(1,2,baseline[:,0],act['ABCD'][:,0],gca())
label_diff(2,3,act['ABCD'][:,0],act['DCBA'][:,0],gca())
label_diff(5,6,baseline[:,1],act['ABCD'][:,1],gca())
label_diff(6,7,act['ABCD'][:,1],act['DCBA'][:,1],gca())
xlim([0,8])
xticks([2,6],['Experiment','Control'])
ylabel('Sequence magnitude')
legend(loc='lower left')
utils.saveplot('Mean_reverse_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
figure()
errorbar(params,mean(act['ABCD'],0),yerr=std(act['ABCD'],0)
/sqrt(N_iterations),c='k')
xlabel(param_name_plot)
ylabel('Magnitude')
pdiff = (params[-1]-params[0])/10.
xlim([params[0]-pdiff,params[-1]+pdiff])
utils.saveplot('Mean_vs_%s_%s.%s'
%(param_name_u,data.c.stats.file_suffix[0],ftype))
for (p,param) in enumerate(params):
figure()
start = 1
for word in test_words:
length = len(word)
x = arange(1,length+1)
errorbar(x,mean(data.meanactivity[:,p,start:start
+length],0), yerr=std(data.meanactivity[:,p,
start:start+length],0)/sqrt(N_iterations),
c=col[word],label=word)
start += length
xlabel('Letter')
ylabel('Magnitude')
legend(loc='best')
xlim([0,length+1])
title(param_name_plot+' = %.2f'%param)
utils.saveplot('Mean_time_%s_%s_%.2f.%s'%\
(data.c.stats.file_suffix[0],param_name_u,param,ftype))
figure()
bar(1,mean(baseline,0)[p],
yerr=std(baseline,0)[p]/sqrt(N_iterations),color=c_gray,
error_kw=ekw,label='Baseline',align='center')
for (i,word) in enumerate(test_words):
bar(i+2,mean(act[word],0)[p],
yerr=std(act[word],0)[p]/sqrt(N_iterations),
color=col[word],error_kw=ekw,label=word,
align='center')
tick_params(axis='x',which='both',bottom='off',top='off',
labelbottom='off')
xlim([0.5,i+2.5])
xlabel(param_name_plot+' = %.2f'%param)
ylabel('Sequence magnitude')
legend(loc='upper left')
title(param_name_plot+' = %.2f'%param)
utils.saveplot('Mean_reverse_%s_%s_%.2f.%s'%\
(data.c.stats.file_suffix[0],param_name_u,param,ftype))
figure()
for (i,word) in enumerate(test_words):
bar(i+1,mean(act[word],0)[p],
yerr=std(act[word],0)[p]/sqrt(N_iterations),
color=col[word],error_kw=ekw,align='center',
label=word)
# Test significance
for (j,word_cp) in enumerate(test_words[i+1:]):
label_diff(i+1,j+i+2,act[word][:,p],
act[word_cp][:,p],gca())
l = i+1
for (i,word) in enumerate(test_words):
bar(i+2+l,mean(act_2nd[word],0)[p],
yerr=std(act_2nd[word],0)[p]/sqrt(N_iterations),
color=col[word],error_kw=ekw,align='center')
# Test significance
for (j,word_cp) in enumerate(test_words[i+1:]):
label_diff(i+2+l,j+i+3+l,act_2nd[word][:,p],
act_2nd[word_cp][:,p],gca())
legend(loc='lower left')
tick_params(axis='x',which='both',bottom='off',top='off')
xticks([i//2+1,l+3],['Full sequence','Second element'])
xlim([0,2*(i+1)+2])
ylabel('Magnitude')
#~ title(param_name_plot+' = %.2f'%param)
utils.saveplot('Mean_2nd_%s_%s_%.2f.%s'%\
(data.c.stats.file_suffix[0],param_name_u,param,ftype))
if (data.__contains__('meanpattern')
and data.__contains__('meanactivity')):
test_words = data.c.source.test_words[0]
pats = {}
start = 1
for word in test_words:
length = len(word)
pats[word] = data.meanpattern[:,:,start:start+length]
start += length
if ('ABCD' in test_words and 'A_CD' in test_words and 'E_CD' in
test_words):
for (p,param) in enumerate(params):
figure()
dist_con = sum(abs(pats['E_CD'][:,p,1,None]
-pats['ABCD'][:,p,:]),2)
dist_exp = sum(abs(pats['A_CD'][:,p,1,None]
-pats['ABCD'][:,p,:]),2)
bar(1,mean(dist_con[:,1]),
yerr=std(dist_con[:,1])/sqrt(N_iterations),
color=col['E_CD'],error_kw=ekw,align='center')
bar(2,mean(dist_exp[:,1]),
yerr=std(dist_exp[:,1])/sqrt(N_iterations),
color=col['A_CD'],error_kw=ekw,align='center')
label_diff(1,2,dist_con[:,1],dist_exp[:,1],gca())
xticks([1,2],['E_CD','A_CD'])
y_lim = ylim()
ylim([0,y_lim[1]*1.1])
ylabel('Manhattan distance')
utils.saveplot('Mean_dist_%s_%s_%.2f.%s'%
(data.c.stats.file_suffix[0],param_name_u,param,
ftype))
### Plot endweight Stat
if False and data.__contains__('endweight'):
# First the logweight:
logweight = data.endweight[0][data.endweight[0]>0]
figure()
logbins = logspace(-2,0,10)
(y,_) = histogram(logweight,bins=logbins)
#fit data to lognormal
x = logbins[:-1]+(logbins[0]+logbins[1])/2.0
semilogx(x,y,'.')
# Do the fitting
def lognormal(x,mue,var,scale):
return scale * (exp(- ((log(x)-mue)*(log(x)-mue))
/ (2*var)) / (x*sqrt(2*pi*var)))
popt, pcov = curve_fit(lognormal, x, y)
curve_x = logspace(-2,0,100)
fitted_y = lognormal(curve_x,*popt)
plot(curve_x,fitted_y)
title('Final Weight Distribution (%s)'
%(data.c.stats.file_suffix[0]))
xlabel('Weight')
ylabel('Frequency')
legend(('data', 'lognormal fit (mue=%.3f var=%.3f scale=%.3f)'
%(popt[0], popt[1], popt[2])))
utils.saveplot('LogWeights_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
# Now scale-free property
tmp = data.endweight[0]>0.0+0.0
binary_connections = tmp+0.0
in_degree = sum(binary_connections,1)
out_degree = sum(binary_connections,0)
fig = figure()
fig.add_subplot(131)
hist(in_degree)
ylabel('frequency')
xlabel('in degree')
fig.add_subplot(132)
hist(out_degree)
xlabel('out degree')
fig.add_subplot(133)
hist(in_degree+out_degree)
xlabel('in+out degree')
plt.suptitle('Degree distributions')
utils.saveplot('Degree_Distributions_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
if False and (data.__contains__('Spikes') and data.__contains__('endweight')
and data.__contains__('Bayes')):
steps_plastic = data.c.steps_plastic[0]
steps_noplastic_train = data.c.steps_noplastic_train[0]
steps_noplastic_test = data.c.steps_noplastic_test[0]
# TODO Plot response probabilities of input units from plot_single
if data.__contains__('smallworld'):
figure()
gamma = np.zeros(N)
lam = np.zeros(N)
S_W = np.zeros(N)
print data.smallworld
for (i,item) in enumerate(data.smallworld):
gamma += item.T[0][:N]
lam += item.T[1][:N]
S_W += item.T[2][:N]
gamma /= (1.0*shape(data.smallworld)[0])
lam /= (1.0*shape(data.smallworld)[0])
S_W /= (1.0*shape(data.smallworld)[0])
for i in range(N):
plot([1,2,3],[gamma[i],lam[i],S_W[i]],'o')
plot([0,4],[1,1],'--')
legend(params)
xticks([1,2,3],['gamma','lambda','S_W'])
title('Small-world-ness with respect to %s'
%data.c.cluster.vary_param[0])
utils.saveplot('small_world_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
### Plot ISIs
if False and data.__contains__('ISIs'):
figure()
x = np.array(range(0,50))
plot(x,data.ISIs[0][:], '.')
# Do the fitting
def exponential(x, a, b):
return a * np.exp(-b*x)
popt, pcov = curve_fit(exponential, x, data.ISIs[0][:])
fitted_y = exponential(x,*popt)
plot(x,fitted_y)
title('Interspike Intervals (%s)'%(data.c.stats.file_suffix[0]))
xlabel('ISI (Time Step)')
ylabel('Frequency')
legend(('data', 'exp fit (scale:%.3f exponent:%.3f)'
%(popt[0],-popt[1])))
utils.saveplot('ISIs_%s.%s'%(data.c.stats.file_suffix[0],ftype))
### Plot ConnectionFraction
if (data.__contains__('ConnectionFraction') and
data.c.stats.__contains__('only_last')):
connections = np.zeros(N)
lookback = 3000
for i in range(N):
for j in range(np.shape(data.ConnectionFraction)[0]):
connections[i] += sum(data.ConnectionFraction[j][i]
[-lookback:])/(lookback*1.0)
connections[i] /= 1.0*np.shape(data.activity)[0]
figure()
plot(params,connections,'o')
title('Fraction of ex-ex connections for last 3000 steps (%s)'
%(data.c.stats.file_suffix[0]))
xlabel('%s'%data.c.cluster.vary_param[0])
ylabel('Connection fraction')
utils.saveplot('Connections_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
figure()
for i in range(N):
#TODO average over all
plot(data.ConnectionFraction[0][i])
legend(data.paramtracker[0])
xlabel('Steps')
ylabel('Connection fraction')
only_last = data.c.stats.only_last[0]
N_steps = data.c.N_steps[0]
stepsize = only_last//2
xticks([0,stepsize,2*stepsize,3*stepsize,4*stepsize],
[0,N_steps//2,'<--timelapse | last---------->',
N_steps-only_last//2,N_steps])
title('Connection fraction for %s = %.3f'
%(data.c.cluster.vary_param[0],params[i]))
utils.saveplot('Connections2_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
if (data.__contains__('ConnectionFraction') and not
data.c.stats.__contains__('only_last')):
figure()
N_points = 1000
spacing = data.c.steps_plastic[0]//N_points
x = linspace(0,data.c.steps_plastic[0],N_points)
for p in range(N_params):
fractions = data.ConnectionFraction[:,p,
:data.c.steps_plastic[0]:spacing]
errorspan(x,mean(fractions,0),yerr=std(fractions,0),
label=params[p][0])
xlim([x[0]-0.05*x[-1],x[-1]])
legend(loc='upper right',title=param_name_plot)
xlabel('Step')
ylabel('Fraction of E-E connections')
tight_layout()
utils.saveplot('Connections_%s.%s'%\
(data.c.stats.file_suffix[0],ftype))
### Plot effect of double_synapses
if (data.__contains__('W_ee_history') and
data.__contains__('W_ee_2_history')):
W_ee_hs = data.W_ee_history
W_ee2_hs = data.W_ee_2_history
from plot_single import parallel_stats
diffs = np.zeros((N_iterations,N_params,shape(W_ee_hs)[2]))
cvs = np.zeros((N_iterations,N_params,shape(W_ee_hs)[2]))
for (i) in range(N_params):
for j in range(N_iterations):
(diffs[j,i,:],cvs[j,i,:],_,_) = parallel_stats(
W_ee_hs[j,i],W_ee2_hs[j,i])
figure()
x = linspace(0,data.c.N_steps[0],shape(W_ee_hs)[2])
for (i,p) in enumerate(params):
errorspan(x,mean(cvs[:,i],0),std(cvs[:,i],0),
label=param_name_plot+" = %.2f"%p)
plot([x[0],x[-1]],[0.083,0.083],'--k',
label='CV from [Bartol et al.]')
ylabel('Median CV between weight pairs')
xlabel('Step')
xlim([x[0]-0.05*x[-1],x[-1]])
legend(loc='best')
tight_layout()
utils.saveplot('DoubleSynapses_CV_%s.%s'\
%(data.c.stats.file_suffix[0],ftype))
if data.__contains__('weefail'):
weefail = data.weefail
N_steps = data.c.N_steps[0]
x = arange(N_steps)
N_points = 1000
spacing = data.c.steps_plastic[0]//N_points
figure()
for (i,p) in enumerate(params):
errorspan(x[::spacing],mean(weefail[:,i,::spacing],0),
std(weefail[:,i,::spacing],0)/N_iterations,
label=param_name_plot+" = %.2f"%p)
xlabel('Step')
ylabel('Synaptic failure fraction')
xlim([x[0]-0.05*x[-1],x[-1]])
legend(loc='best')
tight_layout()
utils.saveplot('weefail_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
### Plot WeightLifetime
if False and data.__contains__('WeightLifetime') and \
any(data.WeightLifetime[0][:] > 0):
figure()
logbins = logspace(2,4,20)
(y,_) = histogram(data.WeightLifetime[0]
[data.WeightLifetime[0]>0],bins=logbins)
x = logbins[:-1]+(logbins[0]+logbins[1])/2.0
loglog(x,y,'.')
def powerlaw(x,a,k):
return a*x**k
popt, pcov = curve_fit(powerlaw, x, y)
fitted_y = powerlaw(x,*popt)
plot(x,fitted_y)
title('Weight Lifetime (%s)'%(data.c.stats.file_suffix[0]))
xlabel('Lifetime (Steps)')
ylabel('Frequency')
legend(('data','powerlaw-fit (a=%.3f k=%.3f)'
%(popt[0],popt[1])))
utils.saveplot('WeightLifetime_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
### Plot WeightChangeStat
if False and data.__contains__('WeightChange'):
# 0:weights, 1:abschange, 2:relchange
fig = figure()
fig.add_subplot(211)
plot(data.WeightChange[0][0][-(data.WeightChange[0][0]==0)],
data.WeightChange[0][1][-(data.WeightChange[0][0]==0)],'.')
ylabel('Absolute Change')
fig.add_subplot(212)
plot(data.WeightChange[0][0][-(data.WeightChange[0][0]==0)],
data.WeightChange[0][2][-(data.WeightChange[0][0]==0)],'.')
xlabel('Weight')
ylabel('Relative Change')
plt.suptitle('Change of Weights over %d Steps (%s)'
%(3000,data.c.stats.file_suffix[0]))
utils.saveplot('WeightChange_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
### Plot InputWeightStat
if data.__contains__('InputWeight'):
figure()
N_samples = shape(data.InputWeight)[4]
## Different colors
NUM_COLORS = N_params
cm = plt.get_cmap('gist_rainbow')
cNorm = colors.Normalize(vmin=0, vmax=NUM_COLORS-1)
scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
plt.gca().set_color_cycle([scalarMap.to_rgba(i) for i in
range(NUM_COLORS)])
sums_weights = np.zeros((N_params,N_iterations,N_samples))
for param in range(N_params):
for iteration in range(N_iterations):
sums_weights[param,iteration,:] = \
data.InputWeight[iteration,param].sum(0).sum(0)
sums_weights[param,iteration,:] /= \
sums_weights[param,iteration,0]
sums_weights[param,iteration,
sums_weights[param,iteration]==0] = 1
#average over iterations
plot((sums_weights.sum(1)/(1.0*N_iterations)).T)
xlabel('Step')
only_last = data.c.stats.only_last[0]
N_steps = data.c.N_steps[0]
stepsize = only_last//2
xticks([0,stepsize,2*stepsize,3*stepsize,4*stepsize],
[0,N_steps//2,'<--timelapse | last---------->',
N_steps-only_last//2,N_steps])
ylabel('Normalized sum of all input weights')
legend(data.paramtracker[0,:])
title('Input weight influence with param %s'
%data.c.cluster.vary_param[0])
utils.saveplot('InputWeight_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
### Plot SpontPatterns
if data.__contains__('SpontPattern'):
# gather or gatherv?
if shape(shape(data.SpontPattern[:]))[0] == 2:
gatherv = True
index_old = 0
else:
N_indices = shape(data.SpontPattern)[3]
gatherv = False
indexfreqs_mean_cumm = np.zeros((N_params,N_indices))
indexfreqs_std_cumm = np.zeros((N_params,N_indices))
patternfreqs_mean_cumm = np.zeros((N_params,N_indices))
patternfreqs_std_cumm = np.zeros((N_params,N_indices))
for param in range(N_params):
filename = os.path.join(pickle_dir,
"source_plastic_%s_%.3f.pickle"
%(param_name,params[param]))
source_plastic = pickle.load(gzip.open(filename,"r"))
if isinstance(source_plastic,TrialSource):
source_plastic = source_plastic.source
words = source_plastic.words
word_string = ''.join(words)
if gatherv:
index_new = \
where(data.SpontPattern[:]==-1)[1][2*(param+1)-1]
freqs = data.SpontPattern[:,index_old:index_new]
index_old = index_new
freqs = freqs[freqs>=0]
freqs = reshape(freqs,(N_iterations,2,-1))
N_indices = shape(freqs)[2]
else:
freqs = data.SpontPattern[:,param,:,:]
# Normalize to relative frequencies
freqs /= (1.*data.NormLast[0,param,0])
# First index frequencies
indexfreqs_mean = mean(freqs[:,0,:],0)
indexfreqs_std= std(freqs[:,0,:],0)/sqrt(N_iterations)
figure()
x = arange(N_indices)
bar(x,indexfreqs_mean,\
yerr=indexfreqs_std,\
align='center',label='Spontaneous freq.')
#,color=repeat(['b','r'],[4,4]))
#~ title('Spontaneous activity for %s=%.2f'
#~ %(param_name,params[param]))
# this assumes transition probabilities independent of
# the precessor
word_probs = source_plastic.probs[0]
word_length = [len(x) for x in words]
norm_probs = word_probs/sum(map(lambda x,y:x*y,
word_probs,word_length))
lstart = 0
for (i,l) in enumerate(word_length):
p = norm_probs[i]
# default bar width is 0.8
plot([lstart-0.4,lstart+l-0.6],[p,p],'r--')
lstart += l
plot([],[],'r--',label='Presentation freq.')
xlim([-2,len(indexfreqs_mean)+1])
ax = gca()
ax.set_xticks(arange(len(word_string)))
ax.set_xticklabels(array([x for x in word_string]))
ylabel('Relative frequency')
xlabel('Letter')
tight_layout()
legend(loc='best')
utils.saveplot('SpontAct_%s_%s_%.2f.%s'\
%(data.c.stats.file_suffix[0],param_name_u,
params[param],ftype))
# Then pattern frequencies
# Normalize to relative occurances
for i in range(N_iterations):
freqs[i,1,:] /= sum(freqs[i,1,:])
patternfreqs_mean = mean(freqs[:,1,:],0)
patternfreqs_std = std(freqs[:,1,:],0)/sqrt(N_iterations)
figure()
N_patterns = len(words)*2
bar(arange(N_patterns),patternfreqs_mean[:N_patterns],\
yerr=patternfreqs_std[:N_patterns],align='center')
#~ title('Spontaneous patterns for %s=%.2f'
#~ %(param_name,params[param]))
xlim([-2,N_patterns+1])
ylim([0,1])
ax = gca()
ax.set_xticks(arange(N_patterns))
ax.set_xticklabels(words + [x[::-1] for x in words],
rotation=30,ha='right')
ylabel('Relative frequency')
xlabel('Pattern')
tight_layout()
utils.saveplot('SpontPat_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[param],ftype))
if not gatherv:
indexfreqs_mean_cumm[param,:] = indexfreqs_mean
indexfreqs_std_cumm[param,:] = indexfreqs_std
patternfreqs_mean_cumm[param,:] = patternfreqs_mean
patternfreqs_std_cumm[param,:] = patternfreqs_std
if not gatherv:
figure()
for index in range(N_indices):
errorbar(params,#+random(shape(params))*0.1*std(params),
indexfreqs_mean_cumm[:,index],
yerr=indexfreqs_std_cumm[:,index],
label=word_string[index])
hold('on')
legend(loc='center right')
#~ title('Letter frequencies')
xlabel(param_name_plot)
minmax = [min(params).copy(),max(params).copy()]
delta = (minmax[1]-minmax[0])*0.1
minmax[0] -= delta
minmax[1] += delta
xlim(minmax)
ylabel('Relative frequency')
tight_layout()
utils.saveplot('change_freqs_%s.%s'%(param_name_u,ftype))
figure()
allwords = words + [x[::-1] for x in words]
for index in range(N_patterns):
errorbar(params,#+random(shape(params))*0.1*std(params),
patternfreqs_mean_cumm[:,index],
yerr=patternfreqs_std_cumm[:,index],
label=allwords[index])
hold('on')
legend(loc='center right')
#~ title('Pattern frequencies')
xlabel(param_name_plot)
xlim(minmax)
ylabel('Relative frequency')
tight_layout()
utils.saveplot('change_patterns_%s.%s'%(param_name_u,ftype))
if data.__contains__('EvokedPred'):
# Reps x Params x Words x Step x pinv/base
max_step = shape(data.EvokedPred)[-2]#15 #-word_length
pred_spont = data.EvokedPred[:,:,:,:,0]
pred_base = data.EvokedPred[:,:,:,:,1]
for p in range(N_params):
inputi = data.InputIndex[0,p]
filename = os.path.join(pickle_dir,
"source_%s_%s_%.3f.pickle"
%(data.c.stats.quenching[0],
param_name,params[p]))
source = pickle.load(gzip.open(filename,"r"))
if isinstance(source,TrialSource):
source = source.source
word_lengths = [len(x) for x in source.words]
word_length = max(word_lengths)
figure()
axvspan(0,word_length-1,color='#E6E6E6')
secondstim_start = word_length # length of first word
secondstim_stop = word_length # length of second word
if data.c.stats.quenching[0] == 'test':
secondstim_start += data.c.wait_min_test[0]
secondstim_stop += data.c.wait_var_test[0]
elif data.c.stats.quenching[0] == 'train':
secondstim_start += data.c.wait_min_train[0]
secondstim_stop += data.c.wait_var_train[0]
else:
secondstim_start = x.max() # ugly and I know it
secondstim_stop = x.max()+secondstim_start
secondstim_stop += secondstim_start
axvspan(secondstim_start,secondstim_stop,facecolor='w',
edgecolor='#E6E6E6',
linewidth=0,hatch="x")
from scipy.stats import nanmean
pred_spont_p = nanmean(pred_spont[:,p,:,:max_step],1)
pred_base_p = nanmean(pred_base[:,p,:,:max_step],1)
x = arange(shape(pred_spont_p)[1])
errorbar(x,mean(pred_spont_p,0),
std(pred_spont_p,0)/sqrt(N_iterations),color='b',
label='Spont. pred.')
hold('on')
errorbar(x,mean(pred_base_p,0),
std(pred_base_p,0)/sqrt(N_iterations),
color='#808080',label='Baseline')
y_lim = ylim()
ylim(y_lim)
xlim([x.min(),x.max()])
legend(loc='best')
xlabel('Step after stimulus onset')
ylabel('Pearson correlation to evoked response')
#~ suptitle('%s = %.2f'%(param_name,params[p]))
tight_layout()
utils.saveplot('evokedpred_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[p],ftype))
# Assuming identical word length for shaded areas
figure()
axvspan(0,word_length-1,color='#E6E6E6')
axvspan(secondstim_start,secondstim_stop,facecolor='w',
edgecolor='#E6E6E6',
linewidth=0,hatch="x")
# Roll to get frac_A=1 to front (A first letter in alphabet and
# evokedpred sorted by letters)
frac_A = roll(data.c.frac_A[0],1)
for (i,frac) in enumerate(frac_A):
errorbar(x,mean(pred_spont[:,:,i],1).mean(0),
mean(pred_spont[:,:,i],1).std(0)/sqrt(N_iterations),
label='%.2fA'%frac)
ylabel('Pearson correlation to evoked response')
xlabel('Step after stimulus onset')
legend(loc='best')
tight_layout()
utils.saveplot('evokedpred_byword_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
if data.__contains__('Bayes'):
# Remove all-zero returned matrices and matrices with
# values >+-10 from failed SVD and values==0 from failed SVD
from scipy.interpolate import interp1d
bayes = np.zeros(shape(data.Bayes)[1:])
bayes_std = np.zeros(shape(data.Bayes)[1:])
for p in range(N_params):
tmp = []
for i in range(N_iterations):
if not (any(data.Bayes[i,p]>10) or
any(data.Bayes[i,p]<-10) or
all(data.Bayes[i,p] == 0)):
tmp.append(data.Bayes[i,p])
assert(not tmp == [])
bayes[p] = mean(array(tmp),0)
bayes_std[p] = std(array(tmp),0)/sqrt(N_iterations)
frac_A = data.c.frac_A[0]
'''
Linearly interpolate the crossing point between curve Y1 and Y2
This assumes that Y1 starts of smaller than Y2
It will return the first intersection point
If there are no intersections, return the x-value at the end
of the interval, where Y1 and Y2 are most similar
'''
def get_crossing(x,Y1,Y2,N_points=1000):
precise_x = np.linspace(x.min(),x.max(),N_points)
f_y1 = interp1d(x,Y1)
f_y2 = interp1d(x,Y2)
y_y1 = f_y1(precise_x)
y_y2 = f_y2(precise_x)
crossing = where(y_y1>y_y2)
if shape(crossing)[1]>0:
crosspoint = crossing[0][0]
else:
if abs((Y1[-1]-Y2[-1])) < abs((Y1[0]-Y2[0])):
crosspoint = N_points-1
else:
crosspoint = 0
return precise_x[crosspoint]
raw_crossings = zeros((N_params,N_iterations))
for i in range(N_params):
for j in range(N_iterations):
raw_crossings[i,j] = get_crossing(frac_A,
data.Bayes[j,i,:,4],data.Bayes[j,i,:,5])
crossings = mean(raw_crossings,1)
crossings_std = std(raw_crossings,1)
# Fit optimal model
from chartmann.spont.optimal_channels import OptimalChannels
channels = OptimalChannels(N_u=data.c.N_u_e[0])
N_As = (frac_A*data.c.N_u_e[0]).round().astype(int)
def opt_wrapper(x,p_uA_given_A,p_uA_given_B,p_A):
channels.p_uA_given_A = p_uA_given_A
channels.p_uB_given_B = p_uA_given_A
channels.p_uA_given_B = p_uA_given_B
channels.p_uB_given_A = p_uA_given_B
return channels.optimal_inference(p_A=p_A,N_As=x,
N_samples=10000)
N_optprobs = int(round(0.9/0.05))+1
ps_uA_given_A = linspace(0.05,0.95,N_optprobs)
ps_uA_given_B = linspace(0.05,0.95,N_optprobs)
best_so_far = inf
'''
Parameter symmetries:
if:
p_uA_given_A = a
p_uA_given_B = b
then the following combinations give the same result:
p_uA_given_A = b
p_uA_given_B = a
and
p_uA_given_A = 1-a
p_uA_given_B = 1-b
Intuitions for conservation of information:
1- -> just interpret the transmission as success when failed
b=a -> just renaming of variables
'''
for pAA in ps_uA_given_A:
for pAB in ps_uA_given_B[ps_uA_given_B<=pAA]:
dists = zeros((N_params,N_iterations))
for i in range(N_params):
y_est = opt_wrapper(N_As,pAA,pAB,params[i])
for j in range(N_iterations):
# least squares
dists[i,j] = np.linalg.norm(data.Bayes[j,i,:,4]-y_est)**2
dist = mean(dists)
if dist<best_so_far:
p_uA_given_A = pAA
p_uA_given_B = pAB
best_so_far = dist
#~ p_uA_given_A = 0.3
#~ p_uA_given_B = 0.05
fitted_channels = OptimalChannels(p_uA_given_A=p_uA_given_A,
p_uA_given_B=p_uA_given_B,
N_u=data.c.N_u_e[0])
opt_posteriors = zeros((N_params,len(frac_A)))
opt_crossings = zeros(N_params)
for i in range(N_params):
# Many samples for pretty plots
opt_posteriors[i,:] = fitted_channels.optimal_inference(
p_A=params[i],N_As=N_As,N_samples=10000)
opt_crossings[i] = get_crossing(frac_A,opt_posteriors[i],
1-opt_posteriors[i])
for i in range(N_params):
fig, ax = plt.subplots()
errorbar(frac_A,bayes[i,:,0],bayes_std[i,:,0],fmt='-b',
label='Decision A')
hold('on')
errorbar(frac_A,bayes[i,:,1],bayes_std[i,:,1],fmt='-g',
label='Decision B')
ylim([0,1])
xlim([0,1])
#~ title('%s = %.2f'%(param_name,params[i]))
tmp = 1-params[i]
plot([tmp,tmp],[0,1],'--k',label='1-prior(A)')
legend(loc='upper center')
xlabel('Fraction of cue A in ambiguous cue')
ylabel('Output gain +- stderr')
utils.saveplot('bayes_drive_%s_%f.%s'%(param_name_u,
params[i],ftype))
figure()
# Lines for optimality explanation before data for overlap
# Old/wrong optimal lines
#~ tmp = 1-params[i]
#~ plot([tmp,tmp],[0,1],'--k',label='1-prior(A)')
#~ hold('on')
#~ denom = frac_A*params[i]+frac_A[::-1]*(1-params[i])
#~ plot(frac_A,frac_A*params[i]/denom,'-', color='#808080', \
#~ label='Optimal')
#~ plot(frac_A,frac_A[::-1]*(1-params[i])/denom,'-',\
#~ color='#808080')
plot(frac_A,opt_posteriors[i],'--', color='#808080',
label='Prob. model')
plot(frac_A,1-opt_posteriors[i],'--', color='#808080')
# Actual data here
errorbar(frac_A,bayes[i,:,4],bayes_std[i,:,4],fmt='-b',\
label='Decision A')
hold('on')
errorbar(frac_A,bayes[i,:,5],bayes_std[i,:,5],fmt='-g',\
label='Decision B')
ylim([0,1])
xlim([0,1])
#~ title('%s = %.2f'%(param_name,params[i]))
# Reorder labels b/c ugly
ax = gca()
handles, labels = ax.get_legend_handles_labels()
labels = [z for z in array(labels)[[1,2,0]]]
handles = [z for z in array(handles)[[1,2,0]]]
leg = ax.legend(handles, labels, loc='best')
leg.get_frame().set_alpha(0.5)
#~ legend(loc='best')
xlabel('Fraction of A in ambiguous stimulus')
ylabel('Fraction of decisions')
#~ if i < (N_params-1):
#~ utils.saveplot('bayes_dec_%s_%f.%s'
#~ %(param_name_u,params[i],ftype))
utils.saveplot('bayes_dec_frac_%s_%f.%s'
%(param_name_u,params[i],ftype))
figure()
plot(1-params[:,0],opt_crossings,'--', color='#808080',
label='Prob. model')
errorbar(1-params[:,0],crossings,
crossings_std/sqrt(N_iterations),fmt='o-',
label='Intersection')
#~ plot([tmp,tmp],[0,1],'--k')
# Reorder labels b/c ugly
ax = gca()
handles, labels = ax.get_legend_handles_labels()
labels = [x for x in array(labels)[[1,0]]]
handles = [x for x in array(handles)[[1,0]]]
leg = ax.legend(handles, labels, loc='best')
leg.get_frame().set_alpha(0.5)
ylim([0,1])
xlim([0,1])
xlabel('1 - ('+param_name_plot+')')
ylabel('Intersection of decisions')
tight_layout() # for suplot spacing
utils.saveplot('bayes_dec_intersect_%s.%s'
%(param_name_u,ftype))
figure()
title('Fitted Parameters')
text(2,7,'p(transmission|input) = %.2f'%p_uA_given_A,
fontsize=20)
text(2,3,'p(transmission|noinput) = %.2f'%p_uA_given_B,
fontsize=20)
ylim([0,10])
xlim([0,10])
ax = gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
utils.saveplot('parameters_channelmodel_%s.%s'%(param_name_u,
ftype))
#~ import ipdb; ipdb.set_trace()
if data.__contains__('SpontBayes'):
sb = data.SpontBayes
# over all conditions: check if higher-than-mean readout
# corresponds to higher-than-mean activation of input units
mean_readout = mean(mean(sb,0),2)[:,:,2:]
mean_act = mean(mean(sb,0),2)[:,:,:2]
n_conditions = shape(sb)[2]
relative_effect = np.zeros((N_params,n_conditions,2))
excess = np.zeros((N_params,2))
for param in range(N_params):
for i in range(n_conditions):
indices_0 = where(sb[:,param,i,:,2]
>mean_readout[param,i,0])
indices_1 = where(sb[:,param,i,:,3]
>mean_readout[param,i,1])
# ugly mean computation
vals_0 = []
vals_1 = []
for j in range(shape(indices_0)[1]):
vals_0.append(sb[indices_0[0][j],param,i,
indices_0[1][j],0])
for j in range(shape(indices_1)[1]):
vals_1.append(sb[indices_1[0][j],param,i,
indices_1[1][j],1])
relative_effect[param,i,0] = mean(array(vals_0))\
/mean_act[param,i,0]
relative_effect[param,i,1] = mean(array(vals_1))\
/mean_act[param,i,1]
excess[param,0] = mean((mean_act[param,:,0]-
frac_A*data.c.N_u_e[0]))
excess[param,1] = mean((mean_act[param,:,1]-
frac_A[::-1]*data.c.N_u_e[0]))
figure()
boxplot(relative_effect.flatten()*100-100)
hold('on')
plot([0.75,1.25],[0,0],'--k')
title('Effect of above-average readout on input activity')
ylabel('Increased input activity [%]')
xlabel('Collapsed over all values of the %s'%param_name_plot)
xticks([])
xlim([0.75,1.25])
utils.saveplot('spontbayes_box_%s_%f.%s'
%(param_name_u,params[param],ftype))
figure()
plot(params,excess[:,0],'-b',label='A units')
hold('on')
plot(params,excess[:,1],'-g',label='B units')
xlim([0,1])
legend(loc = 'upper center')
xlabel(param_name_plot)
ylabel('Mean excess activity over all stimuli')
utils.saveplot('spontbayes_excess_%s_%f.%s'
%(param_name_u,params[param],ftype))
if data.__contains__('TrialBayes'):
filename = os.path.join(pickle_dir,"source_%s_%s_%.3f.pickle"\
%('test',param_name,params[0]))
source = pickle.load(gzip.open(filename,"r"))
if isinstance(source,TrialSource):
source = source.source
word_lengths = [len(x) for x in source.words]
word_length = max(word_lengths)
agreements = np.zeros((N_iterations*N_params,2,\
shape(data.TrialBayes)[3]))
count = 0
ig = 30 # only use ig step
# for the time being:
forward_pred = data.c.stats.forward_pred[0]
x = arange(-ig+forward_pred,forward_pred)
for i in range(N_params):
tmp = data.TrialBayes[:,i,0,:]*100
nonzero_set = set(where(tmp!=-100)[0])
nonzero_list = [n for n in nonzero_set]
trials = len(nonzero_list)
tmp = tmp[nonzero_list]
agreements[count:count+trials,0,:] = tmp
tmp = tmp[:,-ig:]
agreement = mean(tmp,0)
agreement_sem = std(tmp,0)/sqrt(trials)
tmp_lstsq = data.TrialBayes[:,i,1,:]*100
tmp_lstsq = tmp_lstsq[nonzero_list]
agreements[count:count+trials,1,:] = tmp_lstsq
tmp_lstsq = tmp_lstsq[:,-ig:] # ignore the last stim
agreement_lstsq = mean(tmp_lstsq,0)
agreement_lstsq_sem = std(tmp_lstsq,0)/sqrt(trials)
count += len(nonzero_list)
figure()
errorbar(x,agreement,agreement_sem,color='#808080',
label='Baseline')
hold('on')
errorbar(x,agreement_lstsq,agreement_lstsq_sem,color='b',
label='Spont. prediction')
y_lim = ylim()
x_lim = xlim()
axvspan(0,word_length-1,color='#E6E6E6')
plot([word_length-1,word_length-1],y_lim,'--g',
label='Pred. position')
ylim(y_lim)
xlim(x_lim)
xlabel('Step relative to stimulus onset')
title('%s = %.2f'%(param_name_plot,params[i]))
ylabel('Correct predictions [%]')
utils.saveplot('trialbayes_%.2f_%s.%s'\
%(params[i],data.c.stats.file_suffix[0],ftype))
agreements = agreements[:count,:,-ig:]
figure()
errorbar(x,mean(agreements[:,0,:],0),
std(agreements[:,0,:],0)/sqrt(count),color='#808080',
label='Baseline')
errorbar(x,mean(agreements[:,1,:],0),
std(agreements[:,1,:],0)/sqrt(count),color='b',
label='Spont. prediction')
y_lim = ylim()
axvspan(0,word_length-1,color='#E6E6E6')
plot([word_length-1,word_length-1],y_lim,'--g',
label='Pred. position')
legend(loc='upper left')
xlabel('Step relative to stimulus onset')
ylabel('Correct predictions [%]')
utils.saveplot('trialbayes_average_%s.%s'\
%(data.c.stats.file_suffix[0],ftype))
# quenching variability
if data.__contains__('Spikes') and \
data.c.stats.quenching is not None:
spikes_before = 10
spikes_after = 10
# number of bins left and right of t (2 -> boxwidth=5)
window_width = data.c.stats.quenching_window[0]
weighted_regression = True
mode = data.c.stats.quenching[0]
assert(mode == 'train' or mode == 'test')
# Get N_words for array
filename = os.path.join(pickle_dir,"source_%s_%s_%.3f.pickle"\
%(mode,param_name,params[0]))
source = pickle.load(gzip.open(filename,"r"))
if isinstance(source,TrialSource):
source = source.source
word_lengths = [len(x) for x in source.words]
max_word_length = max(word_lengths)
N_words = len(source.words)
total_length = max_word_length + spikes_before + spikes_after
# Look at last half of training set
steps_plastic = data.c.steps_plastic[0]
steps_noplastic_train = data.c.steps_noplastic_train[0]
steps_noplastic_test = data.c.steps_noplastic_test[0]
if mode == 'train':
interval = [-steps_noplastic_train-steps_noplastic_test,
-steps_noplastic_test]
else: # test because of assert
interval = [steps_plastic+steps_noplastic_train,-1]
# same order as all: first it, then params
FF = np.zeros((N_iterations,N_params,N_words,
total_length-2*window_width))
means = np.zeros((N_iterations,N_params,N_words,
total_length-2*window_width))
allvars = np.zeros((N_iterations,N_params,N_words,
total_length-2*window_width))
if matlab_comparison:
try:
from mlabwrap import mlab
except ImportError:
matlab_comparison = False
if matlab_comparison:
mlab.addpath(
'/home/chartmann/Desktop/sorn/py/chartmann/spont/')
FFs_mlab = np.zeros((N_iterations,N_params,total_length-7))
means_mlab = np.zeros((N_iterations,N_params,
total_length-7))
meansAll_mlab = np.zeros((N_iterations,N_params,
total_length-7))
for p in range(N_params):
for i in range(N_iterations):
input_spikes = data.Spikes[i,p][:,
interval[0]:interval[1]]
input_index = data.InputIndex[i,p][
interval[0]:interval[1]]
# Determine minimum number of trials
min_trials = inf
word_start = 0
for j in range(N_words):
indices = find(input_index==word_start)
tmp_trials = sum((indices >= spikes_before)*\
(indices <= shape(input_index)[0]
-spikes_after))
if tmp_trials < min_trials:
min_trials = tmp_trials
word_start += word_lengths[j]
# build trial matrix (condition x trial x t x spikes)
N_e = shape(input_spikes)[0]
trials = np.zeros((N_words,min_trials,total_length,N_e))
word_start = 0
for word in range(N_words):
indices = find(input_index==word_start)
indices = indices[((indices >= spikes_before) *
(indices <= shape(input_index)[0]
-(spikes_after
+max_word_length))
)]
indices = indices[-min_trials:] # take from end
for (trial,j) in enumerate(indices):
trials[word,trial,:,:] = input_spikes[:,
j-spikes_before:j
+max_word_length
+spikes_after].T
word_start += word_lengths[word]
# Determine units that do not receive input
noinput_units = arange(N_e)[data.InputUnits[i,p]==0]
if matlab_comparison:
result = mlab.VarVsMean_pythontomat_bulk(trials[:,:,
:,noinput_units])
N = 1
FFs_mlab[i,p] = result[:,:N].T
means_mlab[i,p] = result[:,3*N:4*N].T/1000.
meansAll_mlab[i,p] = result[:,7*N:8*N].T/1000.
for word in range(N_words):
for (t_i,t) in enumerate(arange(0,
total_length-2*window_width)):
# Take this procedure from quenching variability
# paper figure 4:
# Regress between means and variances for all
# neurons in small interval (in our case in
# single step) over trials
# This is summing over the window
# This indexing reshapes to
# (neurons x trials x window)
count = sum(trials[word,:,t:
t+2*window_width+1,noinput_units],2)
meanss = mean(count,1)
means[i,p,word,t_i] = mean(meanss)
varss = std(count,1)**2
allvars[i,p,word,t_i] = mean(varss)
weighting = eye(shape(meanss)[0])
if weighted_regression:
for j in range(shape(meanss)[0]):
weighting[j,j] = min_trials/\
((meanss[j]+0.001)**2)
slope = np.dot(np.dot(meanss.T,weighting),\
varss)/np.dot(meanss.T,\
np.dot(weighting,meanss))
FF[i,p,word,t_i] = slope
x = linspace(-spikes_before+2*window_width,
spikes_after+max_word_length-1,
total_length-(window_width*2))
if matlab_comparison:
x_mlab = x[:shape(FFs_mlab)[2]]
def remove_axes(ax):
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.tick_params(axis='both', direction='out')
ax.get_yaxis().tick_left()
ax.get_xaxis().set_visible(False)
import matplotlib.gridspec as gridspec
lw = 3
figure()
gs = gridspec.GridSpec(2, 1,height_ratios=[0.4,0.6])
subplot(gs[0])
#~ ax = axes(frameon=False)
#~ ax.get_xaxis().set_visible(False)
mmeanall = mean(meansAll_mlab[:,p],0)
smeanall = std(meansAll_mlab[:,p],0)/sqrt(N_iterations)
mmean = mean(means_mlab[:,p],0)
smean = std(means_mlab[:,p],0)/sqrt(N_iterations)
mFF = mean(FFs_mlab[:,p],0)
sFF = std(FFs_mlab[:,p])/sqrt(N_iterations)
errorbar(x_mlab,mmeanall,yerr=smeanall,c='0.5',
label="Raw",lw=lw)
errorbar(x_mlab,mmean,yerr=smean,fmt='k',
label="'Matched'",lw=lw)
minmax = [min(hstack((mmeanall,mmean))),
max(hstack((mmeanall,mmean)))]
minmax[0] = round(minmax[0]-0.0049,3)
minmax[1] = round(minmax[1]+0.0049,3)
minmaxx = [x_mlab[0]-1,max(x_mlab)+0.2]
ylabel('Spikes/step')
gca().locator_params(axis='y',nbins=4) # 4 ticks/axis
remove_axes(gca())
legend(loc='best')
ylim(minmax)
xlim(minmaxx)
tight_layout()
subplot(gs[1])
plot(x_mlab,mFF,'k',label='FF',lw=lw)
plot(x_mlab,mFF-sFF,c='0.5',label='SEM',lw=lw)
plot(x_mlab,mFF+sFF,c='0.5',lw=lw)
quiver(-3,ylim()[0],0,0.1,scale=1,label='Stim on')
gca().locator_params(axis='y',nbins=4) # 4 ticks/axis
remove_axes(gca())
legend(loc='best')
ylabel('Fano Factor')
ylim([min(mFF-sFF)-0.01,max(mFF+sFF)+0.01])
xlim(minmaxx)
tight_layout()
utils.saveplot('quenching_word_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],
param_name_u,params[p],ftype))
FF[isnan(FF)] = 0 # nans usually are around small values
# Rearrange to match frequency
FFnew = roll(FF,-1,axis=2)
for p in range(N_params):
for word in range(N_words):
# This is AX* and BX* (word starting with index 0 and
# index word_length because A,B first two letters in
# alphabet)
# This is accounted for in the Bayes stat by resorting
if word == 0 or word==1:
fig,axes = subplots(2, sharex=True)
ax1 = axes[1]
ax1.errorbar(x,mean(allvars[:,p,word,:],0),
std(allvars[:,p,word,:],0)\
/sqrt(N_iterations),fmt='b')
ax1.hold('on')
ax1.set_xlabel('Step')
ax1.yaxis.label.set_color('b')
y_lim = [min(flatten(mean(allvars[:,:,word,:],0))),
max(flatten(mean(allvars[:,:,word,:],0)))]
ax1.set_ylim(y_lim)
locator_params(axis='y',nbins=4) # 4 ticks/axis
ax2 = ax1.twinx()
ax2.errorbar(x,mean(means[:,p,word,:],0),
std(means[:,p,word,:],0)\
/sqrt(N_iterations),fmt='r')
ax2.yaxis.label.set_color('r')
ax2.set_ylabel('Mean rate')
locator_params(axis='y',nbins=4) # 4 ticks/axis
y_lim = [min(flatten(mean(means[:,:,word,:],0))),
max(flatten(mean(means[:,:,word,:],0)))]
hold('on')
ax1.axvspan(0,word_lengths[word]-1,color='#E6E6E6')
ylim(y_lim)
xlim([x.min(),x.max()])
ax1.set_ylabel('Variance')
ax = axes[0]
ax.errorbar(x,mean(FF[:,p,word,:],0),
std(FF[:,p,word,:],0)/sqrt(N_iterations)
,fmt='k')
ax.set_ylabel('Fano factor')
ax.locator_params(axis='y',nbins=4) # 4 ticks/axis
# yaxis identical for all parameters for each word
y_lim = [min(flatten(mean(FF[:,:,word,:],0))),
max(flatten(mean(FF[:,:,word,:],0)))]
hold('on')
ax.axvspan(0,word_lengths[word]-1,color='#E6E6E6')
ax.set_ylim(y_lim)
ax.legend(loc='lower left')
tight_layout()
# because tight_layout doesn't recognize twinx
fig.subplots_adjust(right=0.9)
utils.saveplot('quenching_word_%d_%s_%s_%.2f.%s'
%(word,data.c.stats.file_suffix[0],
param_name_u,params[p],ftype))
# suptitle('%s = %.2f'%(param_name,params[p]))
# Plot ambiguity vs. FF for each condition
if False:
minFFs = mean(FFnew.min(axis=3)[:,p],0)
stdminFFs = std(FFnew.min(axis=3)[:,p],0)/sqrt(N_iterations)
figure()
errorbar(frac_A,minFFs,stdminFFs,label='Fano factor')
y_lim = ylim()
axvline(1-params[p],color='k',linestyle='dashed',
label='1-prior(A)')
ylim(y_lim)
gca().locator_params(axis='y',nbins=4) # 4 ticks/axis
xlabel('Fraction of A in ambiguous stimulus')
ylabel('Minimal Fano factor')
legend(loc='best')
tight_layout()
xlim([-0.02,1.02])
utils.saveplot('quenching_vs_amb_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[p],ftype))
if False:
# Plot prior vs. max. FF
# For each stimulation condition:
# For each iteration, take the word
# that maximizes the minimal FF
# Then average over these words
frac_range = frac_A[-1]-frac_A[0]
averagefrac = mean(argmax(FFnew.min(axis=3),2)
/((len(frac_A)-1)/frac_range),0)
stdfrac = std(argmax(FFnew.min(axis=3),2)
/((len(frac_A)-1)/frac_range),0)/sqrt(N_iterations)
# Assume even spacing of frac_A
offset = frac_A[0]
averagefrac += offset
figure()
plot([frac_A[0],frac_A[-1]],[frac_A[0],frac_A[-1]],
color='#808080',label='Identity')
# Reverse to match 1-(prior(A))
errorbar(params,averagefrac[::-1],stdfrac[::-1],fmt='o-',
label='Fraction')
xlabel('1 - ('+param_name_plot+')')
ylabel('Fraction of A with highest variability')
legend(loc='best')
tight_layout()
xlim([0,1])
ylim([0,1])
utils.saveplot('queisi_snching_vs_prior_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
if data.__contains__('AttractorDynamics'):
frac_A = data.c.frac_A[0]
for p in range(N_params):
output_dists = data.AttractorDynamics
figure()
# This is now frac x step (from cue to target)
mean_od = mean(output_dists[:,p,:,:],0).T[:,::-1]
std_od = std(output_dists[:,p,:,:],0).T[:,::-1]\
/sqrt(N_iterations)
x = arange(-shape(mean_od)[1]+1,1)
for (i,frac) in enumerate(frac_A):
errorbar(x,mean_od[i,:],std_od[i,:],label="%.2f"%frac)
ylabel('Distance between output gains')
xlabel('Steps before target')
legend()
utils.saveplot('attractordynamics_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[p],ftype))
if data.__contains__('OutputDist'):
output_dist = data.OutputDist[:,:,0,:]
output_std = data.OutputDist[:,:,1,:]
frac_A = data.c.frac_A[0]
for i in range(N_params):
figure()
errorbar(frac_A, mean(output_dist[:,i,:],0),
std(output_dist[:,i,:],0), fmt='o-')
ylim([0,1])
x_lim = xlim()
xlim([x_lim[0]-0.1,x_lim[1]+0.1])
xlabel('Fraction of A in ambiguous stimulus')
ylabel('Mean abs diff of normalized output gain +- std')
title('%s = %.2f - mean(min) = %.2f'
%(param_name_plot,params[i],
# get min for each trial and av.
mean(output_dist[:,i,:].min(1))))
utils.saveplot('outputdist_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[i],ftype))
figure()
errorbar(params,mean(mean(output_dist,2),0),
std(mean(output_dist,2),0)/sqrt(N_iterations),fmt='o-')
x_lim = xlim()
xlim([x_lim[0]-0.1,x_lim[1]+0.1])
xlabel(param_name_plot)
ylabel('Attractor score')
utils.saveplot('attractor_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[i],ftype))
# Plot evoked pred vs. FF (high FF should yield better ep)
# first normalize each iteration and param
if data.__contains__('EvokedPred') and 'FF' in locals():
diff = pred_spont[:,:,:,1] - pred_base[:,:,:,1]
FFs = FF[:,:,:,11]
for p in range(N_params):
for i in range(N_iterations):
diff[i,p] -= diff[i,p].min()
diff[i,p] /= diff[i,p].max()
FFs[i,p] -= FFs[i,p].min()
FFs[i,p] /= FFs[i,p].max()
FFs = FFs.flatten()
diff = diff.flatten()
figure()
scatter(FFs,diff)
(s,p) = stats.pearsonr(FFs,diff)
xlabel('Normalized Fano factor after stimulus onset')
ylabel('Normalized(spontpred - staticpred)')
title('p = %.4f'%p)
# Do linear regression fit
A = vstack((FFs,ones(shape(FFs)[0]))).T
w = pinv(A).dot(diff)
y = FFs*w[0]+w[1]
tmp = zip(FFs,y)
tmp.sort()
tmp = array(tmp)
hold('on')
plot(tmp.T[0],tmp.T[1])
utils.saveplot('evokedpred_FF_%s.%s'\
%(data.c.stats.file_suffix[0],ftype))
# Finally plot EP vs. condition at step 1 (indisting. at step0)
# look at method for how predictions are sorted
# --> almost sorted, but not averaged etc.
if data.c.stats.quenching[0] == 'test' and False:
frac_A = data.c.frac_A[0]
for p in range(N_params):
pred_p = pred_spont[:,p,:,1]
# can mean here because order of means doesn't matter
pred_p = mean(pred_p,0) # over iterations
to_mean = pred_p[2:]
meaned = [mean([x,y]) for (x,y) in zip(to_mean[::2],
to_mean[1::2])]
# B, C, D, ..., A
pred_p = hstack((pred_p[1],array(meaned),pred_p[0]))
pred_s = pred_base[:,p,:,1]
pred_s = mean(pred_s,0)
to_mean = pred_s[2:]
meaned = [mean([x,y]) for (x,y) in zip(to_mean[::2],
to_mean[1::2])]
pred_s = hstack((pred_s[1],array(meaned),pred_s[0]))
figure()
plot(frac_A,pred_p,label='Pinv')
hold('on')
plot(frac_A,pred_s,label='STA')
xlabel('Fraction of A in ambiguous stimulus')
ylabel('Prediction')
suptitle('%s = %.2f'%(param_name_plot,params[p]))
legend()
utils.saveplot('evokedpred_fracA_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],
param_name_u,params[p],ftype))
if data.__contains__('patternprobability'):
def KL(p,q):
# in case zero-correction was deactivated
q = q[p>0]
p = p[p>0]
p = p[q>0]
q = q[q>0]
q /= sum(q)
p /= sum(p)
kl = sum(p*log2(p/q))
kl = np.sum(np.where(p != 0, p * np.log2(p / q), 0))
return kl
kl_evoked1_spont = zeros((N_params,N_iterations))
kl_spont_evoked1 = zeros((N_params,N_iterations))
kl_evoked_12 = zeros((N_params,N_iterations))
kl_evoked_21 = zeros((N_params,N_iterations))
kl_spont_12 = zeros((N_params,N_iterations))
kl_spont_21 = zeros((N_params,N_iterations))
kl_exp_spont = zeros((N_params,N_iterations))
kl_con_spont = zeros((N_params,N_iterations))
for p in range(N_params):
for i in range(N_iterations):
p_evoked_1 = data.patternprobability[i,p][0]
p_evoked_2 = data.patternprobability[i,p][1]
p_spont_1 = data.patternprobability[i,p][2]
p_spont_2 = data.patternprobability[i,p][3]
p_spont = (p_spont_1+p_spont_2)/2
kl_evoked1_spont[p,i] = KL(p_evoked_1,p_spont)
kl_spont_evoked1[p,i] = KL(p_spont,p_evoked_1)
kl_evoked_12[p,i] = KL(p_evoked_1,p_evoked_2)
kl_evoked_21[p,i] = KL(p_evoked_2,p_evoked_1)
kl_spont_12[p,i] = KL(p_spont_1,p_spont_2)
kl_spont_21[p,i] = KL(p_spont_2,p_spont_1)
kl_exp_spont[p,i] = KL(p_evoked_1,p_spont)
kl_con_spont[p,i] = KL(p_evoked_2,p_spont)
figure()
bar([1,2,3],[mean(kl_evoked1_spont[p]),mean(kl_evoked_12[p]),
mean(kl_spont_12[p])],yerr=[
std(kl_evoked1_spont[p]),std(kl_evoked_12[p]),
std(kl_spont_12[p])],align='center')
xticks([1,2,3],['$D(e||s)$','$D(e||e)$','$D(s||s)$'])
ylabel('KL-Divergence')
title('%s = %s'%(param_name_u,params[p]))
xlim([0.5,3.5])
utils.saveplot('KLdiv_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[p],ftype))
figure()
x = arange(len(params))
bar(x,mean(kl_evoked1_spont,1),
yerr=std(kl_evoked1_spont,1)/sqrt(N_iterations),
align='center')
xticks(x,['%d'%p for p in params],rotation=30,ha='right')
ylabel('KL-Divergence $D(e||s)$')
xlabel(param_name_plot)
utils.saveplot('KLdiv_%s.%s'\
%(data.c.stats.file_suffix[0],ftype))
# Figure assuming first and second half of evoked are
# experiment and control, respectively
figure()
x = arange(len(params)*2)[::2]
dx = 0.4
bar(x-dx,mean(kl_exp_spont,1),
yerr=std(kl_exp_spont,1)/sqrt(N_iterations),
align='center',color='r',linewidth=2,ecolor='k',
label='Natural')
bar(x+dx,mean(kl_con_spont,1),
yerr=std(kl_con_spont,1)/sqrt(N_iterations),
align='center',color='g',linewidth=2,ecolor='k',
label='Control')
for p in range(N_params):
label_diff(x[p]-dx,x[p]+dx,kl_exp_spont[p],
kl_con_spont[p],gca())
xticks(x[::2],[' %d'%(p//1000) for p in params[::2]],
ha='center')
ylabel('KL-Divergence $D(e||s)$')
legend(loc='best')
if param_name == 'steps_plastic':
param_name_plotting = 'Steps with plasticity [$*10^3$]'
else:
param_name_plotting = param_name
xlabel(param_name_plotting)
tight_layout()
utils.saveplot('KLdiv_new_%s.%s'\
%(data.c.stats.file_suffix[0],ftype))
if __name__ == '__main__':
plot_results(path, datafile)
show()
|
mit
|
jwlawson/tensorflow
|
tensorflow/examples/tutorials/input_fn/boston.py
|
76
|
2920
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def get_input_fn(data_set, num_epochs=None, shuffle=True):
return tf.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),
y=pd.Series(data_set[LABEL].values),
num_epochs=num_epochs,
shuffle=shuffle)
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Train
regressor.train(input_fn=get_input_fn(training_set), steps=5000)
# Evaluate loss over one epoch of test_set.
ev = regressor.evaluate(
input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions over a slice of prediction_set.
y = regressor.predict(
input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))
# .predict() returns an iterator of dicts; convert to a list and print
# predictions
predictions = list(p["predictions"] for p in itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
|
apache-2.0
|
permcody/libmesh
|
doc/statistics/libmesh_sflogos.py
|
7
|
6095
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num
# SF.net pages and SFLogo Impressions.
# On the site, located under "Sourceforge Traffic". Number of logos
# (last column) seems to be the most useful one.
#
# This view should give you the last 12 months data
# https://sourceforge.net/project/stats/detail.php?group_id=71130&ugn=libmesh&mode=12months&type=sfweb
# This data has now changed to Google-analytics style...
# After you select the proper date range, scroll down to the bottom
# of the screen and it should show the totals for the two categories,
# which are listed as "SF Logo" and "other"
# Other SF Logo
data = [
'Jan 2003', 681, 479,
'Feb 2003', 659, 1939,
'Mar 2003', 488, 1754,
'Apr 2003', 667, 3202,
'May 2003', 608, 2552,
'Jun 2003', 562, 2190,
'Jul 2003', 997, 3097,
'Aug 2003', 745, 4708,
'Sep 2003', 906, 4937,
'Oct 2003', 892, 6834,
'Nov 2003', 1257, 8495,
'Dec 2003', 1147, 6439,
'Jan 2004', 823, 7791,
'Feb 2004', 906, 8787,
'Mar 2004', 979, 11309,
'Apr 2004', 835, 9393,
'May 2004', 851, 9796,
'Jun 2004', 750, 9961,
'Jul 2004', 618, 6337,
'Aug 2004', 912, 6647,
'Sep 2004', 554, 5736,
'Oct 2004', 524, 6144,
'Nov 2004', 685, 8122,
'Dec 2004', 583, 6136,
'Jan 2005', 215, 2668,
'Feb 2005', 732, 7618,
'Mar 2005', 944, 10283,
'Apr 2005', 837, 9605,
'May 2005', 1420, 9994,
'Jun 2005', 1691, 12031,
'Jul 2005', 849, 6740,
'Aug 2005', 1068, 11771,
'Sep 2005', 1119, 11459,
'Oct 2005', 772, 8614,
'Nov 2005', 845, 9383,
'Dec 2005', 814, 10606,
'Jan 2006', 1004, 11511,
'Feb 2006', 819, 10693,
'Mar 2006', 1097, 11925,
'Apr 2006', 960, 15664,
'May 2006', 1091, 14194,
'Jun 2006', 906, 12118,
'Jul 2006', 1022, 8935,
'Aug 2006', 914, 9370,
'Sep 2006', 1087, 11397,
'Oct 2006', 1311, 11516,
'Nov 2006', 1182, 10795,
'Dec 2006', 811, 9418,
'Jan 2007', 1236, 11522,
'Feb 2007', 1410, 10669,
'Mar 2007', 1568, 13141,
'Apr 2007', 1544, 12285,
'May 2007', 1362, 14992,
'Jun 2007', 2229, 17716,
'Jul 2007', 1822, 15192,
'Aug 2007', 1446, 12300,
'Sep 2007', 2045, 19599,
'Oct 2007', 2680, 14694,
'Nov 2007', 2344, 15211,
'Dec 2007', 2235, 10683,
'Jan 2008', 1582, 11290,
'Feb 2008', 1712, 12376,
'Mar 2008', 1908, 13204,
'Apr 2008', 2308, 13046,
'May 2008', 2013, 10312,
'Jun 2008', 2082, 11522,
'Jul 2008', 1880, 10859,
'Aug 2008', 2083, 11677,
'Sep 2008', 1739, 11446,
'Oct 2008', 2546, 13463,
'Nov 2008', 2152, 14491,
'Dec 2008', 2600, 15275,
'Jan 2009', 1897, 12910,
'Feb 2009', 1880, 12008,
'Mar 2009', 6348, 12696,
'Apr 2009', 1799, 14048,
'May 2009', 1771, 13122,
'Jun 2009', 1811, 12114,
'Jul 2009', 1878, 13600,
'Aug 2009', 2047, 10828,
'Sep 2009', 2807, 12914,
'Oct 2009', 4025, 17326,
'Nov 2009', 3702, 15648,
'Dec 2009', 3409, 12510,
'Jan 2010', 3737, 31211,
'Feb 2010', 5015, 28772,
'Mar 2010', 5652, 17882,
'Apr 2010', 4019, 17495,
'May 2010', 3336, 18117,
'Jun 2010', 2174, 21288,
'Jul 2010', 874, 13900,
'Aug 2010', 1160, 15153,
'Sep 2010', 1317, 13836,
'Oct 2010', 3543, 15279,
'Nov 2010', 3072, 18663,
'Dec 2010', 2257, 16381,
'Jan 2011', 2513, 19798,
'Feb 2011', 1678, 17870,
'Mar 2011', 1878, 17759,
'Apr 2011', 1948, 21264,
'May 2011', 2696, 15953,
'Jun 2011', 1514, 18409,
'Jul 2011', 1422, 13071,
'Aug 2011', 906, 7857,
'Sep 2011', 976, 9764,
'Oct 2011', 1699, 13285,
'Nov 2011', 1952, 16431,
'Dec 2011', 2735, 17849,
'Jan 2012', 1741, 14358,
'Feb 2012', 1017, 14262,
'Mar 2012', 1361, 14379,
'Apr 2012', 967, 15483,
'May 2012', 2384, 13656,
'Jun 2012', 1337, 14370,
'Jul 2012', 2107, 17286,
'Aug 2012', 8165, 53331,
'Sep 2012', 2268, 14704,
'Oct 2012', 738, 7333, # No data recorded from Oct 10 thru 28?
'Nov 2012', 6104, 39650,
'Dec 2012', 3439, 24706, # libmesh switched to github Dec 10, 2012
'Jan 2013', 2552, 31993,
'Feb 2013', 2107, 24913,
'Mar 2013', 1376, 23953,
'Apr 2013', 1582, 19285,
'May 2013', 1257, 16753,
'Jun 2013', 482, 14458,
'Jul 2013', 465, 11325,
'Aug 2013', 306, 7653,
'Sep 2013', 731, 11332,
'Oct 2013', 795, 15619,
'Nov 2013', 753, 16199,
'Dec 2013', 593, 11596,
'Jan 2014', 489, 11195,
'Feb 2014', 484, 14375,
'Mar 2014', 363, 13050,
'Apr 2014', 357, 15700, # As of June 1, 2014 the site above no longer exists...
]
# Extract list of date strings
date_strings = data[0::3]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%b %Y')))
# Strip out number of logos/month for plotting
n_logos_month = data[2::3]
# Scale by 1000
n_logos_month = np.divide(n_logos_month, 1000.)
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Make the bar chart. One number/month, so width=30
# makes sense.
ax.bar(date_nums, n_logos_month, width=30, color='b')
# Set tick labels at desired locations
xticklabels = ['Jan\n2003', 'Jan\n2005', 'Jan\n2007', 'Jan\n2009', 'Jan\n2011', 'Jan\n2013']
# Get numerical values for the tick labels
tick_nums = []
for x in xticklabels:
tick_nums.append(date2num(datetime.strptime(x, '%b\n%Y')))
ax.set_xticks(tick_nums)
ax.set_xticklabels(xticklabels)
# Make x-axis tick marks point outward
ax.get_xaxis().set_tick_params(direction='out')
# Set the xlimits
plt.xlim(date_nums[0], date_nums[-1]+30);
# Create title
fig.suptitle('SFLogo Pages/Month (in Thousands)')
# Save as PDF
plt.savefig('libmesh_sflogos.pdf')
# Local Variables:
# python-indent: 2
# End:
|
lgpl-2.1
|
btgorman/RISE-power-water-ss-1phase
|
data_water/supplemental/demands/annual/ver_averages.py
|
1
|
4214
|
import matplotlib.pyplot as plt
import numpy as np
import math
monthly_annual_demand_scalar = {
1: 064.,
2: 065.,
3: 070.,
4: 090.,
5: 110.,
6: 140.,
7: 153.,
8: 130.,
9: 120.,
10: 100.,
11: 080.,
12: 074.,
}
monthly_seasonal_demand_scalar = {
1: 0.,
2: 0.,
3: 0.,
4: 0.,
5: 0.,
6: 0.,
7: 0.,
8: 0.,
9: 0.,
10: 0.,
11: 0.,
12: 0.,
}
monthly_summer_average = 0.01*(monthly_annual_demand_scalar[6] + monthly_annual_demand_scalar[7] + monthly_annual_demand_scalar[8]) / 3
monthly_winter_average = 0.01*(monthly_annual_demand_scalar[1] + monthly_annual_demand_scalar[2] + monthly_annual_demand_scalar[12]) / 3
monthly_spring_average = 0.01*(monthly_annual_demand_scalar[2] + monthly_annual_demand_scalar[4] + monthly_annual_demand_scalar[5]) / 3
monthly_fall_average = 0.01*(monthly_annual_demand_scalar[9] + monthly_annual_demand_scalar[10] + monthly_annual_demand_scalar[11]) / 3
for item in monthly_seasonal_demand_scalar:
if item == 6 or item == 7 or item == 8:
monthly_seasonal_demand_scalar[item] = monthly_annual_demand_scalar[item] / monthly_summer_average
if item == 12 or item == 1 or item == 2:
monthly_seasonal_demand_scalar[item] = monthly_annual_demand_scalar[item] / monthly_winter_average
if item == 3 or item == 4 or item == 5:
monthly_seasonal_demand_scalar[item] = monthly_annual_demand_scalar[item] / monthly_spring_average
if item == 9 or item == 10 or item == 11:
monthly_seasonal_demand_scalar[item] = monthly_annual_demand_scalar[item] / monthly_fall_average
print(monthly_seasonal_demand_scalar)
hourly_summer_demand_scalar = {
1: 100.,
2: 095.,
3: 090.,
4: 100.,
5: 110.,
6: 130.,
7: 150.,
8: 175.,
9: 200.,
10: 210.,
11: 200.,
12: 175.,
13: 150.,
14: 147.,
15: 145.,
16: 170.,
17: 200.,
18: 225.,
19: 250.,
20: 263.,
21: 275.,
22: 263.,
23: 250.,
24: 100.,
}
hourly_winter_demand_scalar = {
1: 050.,
2: 045.,
3: 040.,
4: 045.,
5: 050.,
6: 058.,
7: 065.,
8: 073.,
9: 080.,
10: 073.,
11: 065.,
12: 058.,
13: 050.,
14: 045.,
15: 040.,
16: 045.,
17: 050.,
18: 058.,
19: 065.,
20: 073.,
21: 080.,
22: 073.,
23: 065.,
24: 050.,
}
hourly_equinox_demand_scalar = {
1: 075.,
2: 070.,
3: 065.,
4: 073.,
5: 080.,
6: 094.,
7: 108.,
8: 124.,
9: 140.,
10: 142.,
11: 133.,
12: 117.,
13: 100.,
14: 096.,
15: 093.,
16: 108.,
17: 125.,
18: 142.,
19: 158.,
20: 168.,
21: 178.,
22: 168.,
23: 158.,
24: 075.,
}
avg_summer = 0.
avg_winter = 0.
avg_spring = 0.
avg_fall = 0
for item in hourly_summer_demand_scalar:
avg_summer += 0.01 * hourly_summer_demand_scalar[item] / len(hourly_summer_demand_scalar)
for item in hourly_winter_demand_scalar:
avg_winter += 0.01 * hourly_winter_demand_scalar[item] / len(hourly_winter_demand_scalar)
for item in hourly_equinox_demand_scalar:
avg_spring += 0.007225 * hourly_equinox_demand_scalar[item] / len(hourly_equinox_demand_scalar)
for item in hourly_equinox_demand_scalar:
avg_fall += 0.007225 * hourly_equinox_demand_scalar[item] / len(hourly_equinox_demand_scalar)
avg_annual = (avg_summer + avg_winter + avg_spring + avg_fall) / 4
print('average summer:', avg_summer)
print('average winter:', avg_winter)
print('average spring:', avg_spring)
print('average fall:', avg_fall)
print('average annual:', avg_annual)
print('')
avg_monthly = 0.
for item in monthly_annual_demand_scalar:
avg_monthly += 0.01 * monthly_annual_demand_scalar[item] / len(monthly_annual_demand_scalar)
print('average monthly:', avg_monthly)
print('')
avg_hourly = 0.
num_hourly = 1./ (12 * 24)
for item in monthly_seasonal_demand_scalar:
for hour in range(1, 24+1):
if item == 6 or item == 7 or item == 8:
avg_hourly += monthly_seasonal_demand_scalar[item] * hourly_summer_demand_scalar[hour] * 0.01 * num_hourly
if item == 12 or item == 1 or item == 2:
avg_hourly += monthly_seasonal_demand_scalar[item] * hourly_winter_demand_scalar[hour] * 0.01 * num_hourly
if item == 3 or item == 4 or item == 5:
avg_hourly += monthly_seasonal_demand_scalar[item] * hourly_equinox_demand_scalar[hour] * 0.007225 * num_hourly
if item == 9 or item == 10 or item == 11:
avg_hourly += monthly_seasonal_demand_scalar[item] * hourly_equinox_demand_scalar[hour] * 0.007225 * num_hourly
print('average hourly:', avg_hourly)
|
apache-2.0
|
akrherz/idep
|
scripts/tillage_timing/dump_mar10_tab1.py
|
2
|
1284
|
"""Dump for tab1 of requested 10 march 2020 spreadsheet.
"""
import datetime
import glob
from pyiem.dep import read_env
from pyiem.util import logger
import pandas as pd
LOG = logger()
def do_scenario(scenario, plantdate, hucs):
"""Process this scenario."""
envdfs = []
for huc12 in hucs:
for fn in glob.glob(
"/i/%s/env/%s/%s/*" % (scenario, huc12[:8], huc12[8:])
):
df = read_env(fn)
if df.empty:
continue
envdfs.append(df.groupby(df["date"].dt.year).sum().copy())
envdf = pd.concat(envdfs).reset_index()
res = envdf.groupby("date").mean()
LOG.info(plantdate)
for year in range(2008, 2019):
print(
"%s,%.2f,%.2f,M,%.2f"
% (
year,
res.at[year, "precip"],
res.at[year, "runoff"],
res.at[year, "av_det"],
)
)
print()
def main():
"""Go Main Go."""
apr10 = datetime.date(2000, 4, 10)
hucs = [x.strip() for x in open("myhucs.txt").readlines()]
for scenario in range(59, 70):
plantdate = apr10 + datetime.timedelta(days=(scenario - 59) * 5)
do_scenario(scenario, plantdate, hucs)
if __name__ == "__main__":
main()
|
mit
|
humdings/zipline
|
zipline/pipeline/engine.py
|
1
|
22892
|
"""
Compute Engine definitions for the Pipeline API.
"""
from abc import (
ABCMeta,
abstractmethod,
)
from uuid import uuid4
from six import (
iteritems,
with_metaclass,
)
from numpy import array
from pandas import DataFrame, MultiIndex
from toolz import groupby, juxt
from toolz.curried.operator import getitem
from zipline.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray
from zipline.errors import NoFurtherDataError
from zipline.utils.numpy_utils import (
as_column,
repeat_first_axis,
repeat_last_axis,
)
from zipline.utils.pandas_utils import explode
from .term import AssetExists, InputDates, LoadableTerm
from zipline.utils.date_utils import compute_date_range_chunks
from zipline.utils.pandas_utils import categorical_df_concat
from zipline.utils.sharedoc import copydoc
class PipelineEngine(with_metaclass(ABCMeta)):
@abstractmethod
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute values for ``pipeline`` between ``start_date`` and
``end_date``.
Returns a DataFrame with a MultiIndex of (date, asset) pairs.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
"""
raise NotImplementedError("run_pipeline")
@abstractmethod
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
"""
Compute values for `pipeline` in number of days equal to `chunksize`
and return stitched up result. Computing in chunks is useful for
pipelines computed over a long period of time.
Parameters
----------
pipeline : Pipeline
The pipeline to run.
start_date : pd.Timestamp
The start date to run the pipeline for.
end_date : pd.Timestamp
The end date to run the pipeline for.
chunksize : int
The number of days to execute at a time.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
"""
raise NotImplementedError("run_chunked_pipeline")
class NoEngineRegistered(Exception):
"""
Raised if a user tries to call pipeline_output in an algorithm that hasn't
set up a pipeline engine.
"""
class ExplodingPipelineEngine(PipelineEngine):
"""
A PipelineEngine that doesn't do anything.
"""
def run_pipeline(self, pipeline, start_date, end_date):
raise NoEngineRegistered(
"Attempted to run a pipeline but no pipeline "
"resources were registered."
)
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
raise NoEngineRegistered(
"Attempted to run a chunked pipeline but no pipeline "
"resources were registered."
)
def default_populate_initial_workspace(initial_workspace,
root_mask_term,
execution_plan,
dates,
assets):
"""The default implementation for ``populate_initial_workspace``. This
function returns the ``initial_workspace`` argument without making any
modifications.
Parameters
----------
initial_workspace : dict[array-like]
The initial workspace before we have populated it with any cached
terms.
root_mask_term : Term
The root mask term, normally ``AssetExists()``. This is needed to
compute the dates for individual terms.
execution_plan : ExecutionPlan
The execution plan for the pipeline being run.
dates : pd.DatetimeIndex
All of the dates being requested in this pipeline run including
the extra dates for look back windows.
assets : pd.Int64Index
All of the assets that exist for the window being computed.
Returns
-------
populated_initial_workspace : dict[term, array-like]
The workspace to begin computations with.
"""
return initial_workspace
class SimplePipelineEngine(PipelineEngine):
"""
PipelineEngine class that computes each term independently.
Parameters
----------
get_loader : callable
A function that is given a loadable term and returns a PipelineLoader
to use to retrieve raw data for that term.
calendar : DatetimeIndex
Array of dates to consider as trading days when computing a range
between a fixed start and end.
asset_finder : zipline.assets.AssetFinder
An AssetFinder instance. We depend on the AssetFinder to determine
which assets are in the top-level universe at any point in time.
populate_initial_workspace : callable, optional
A function which will be used to populate the initial workspace when
computing a pipeline. See
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
for more info.
See Also
--------
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
"""
__slots__ = (
'_get_loader',
'_calendar',
'_finder',
'_root_mask_term',
'_root_mask_dates_term',
'_populate_initial_workspace',
)
def __init__(self,
get_loader,
calendar,
asset_finder,
populate_initial_workspace=None):
self._get_loader = get_loader
self._calendar = calendar
self._finder = asset_finder
self._root_mask_term = AssetExists()
self._root_mask_dates_term = InputDates()
self._populate_initial_workspace = (
populate_initial_workspace or default_populate_initial_workspace
)
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute a pipeline.
The algorithm implemented here can be broken down into the following
stages:
0. Build a dependency graph of all terms in `pipeline`. Topologically
sort the graph to determine an order in which we can compute the
terms.
1. Ask our AssetFinder for a "lifetimes matrix", which should contain,
for each date between start_date and end_date, a boolean value for
each known asset indicating whether the asset existed on that date.
2. Compute each term in the dependency order determined in (0), caching
the results in a a dictionary to that they can be fed into future
terms.
3. For each date, determine the number of assets passing
pipeline.screen. The sum, N, of all these values is the total
number of rows in our output frame, so we pre-allocate an output
array of length N for each factor in `terms`.
4. Fill in the arrays allocated in (3) by copying computed values from
our output cache into the corresponding rows.
5. Stick the values computed in (4) into a DataFrame and return it.
Step 0 is performed by ``Pipeline.to_graph``.
Step 1 is performed in ``SimplePipelineEngine._compute_root_mask``.
Step 2 is performed in ``SimplePipelineEngine.compute_chunk``.
Steps 3, 4, and 5 are performed in ``SimplePiplineEngine._to_narrow``.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
:meth:`zipline.pipeline.engine.PipelineEngine.run_chunked_pipeline`
"""
if end_date < start_date:
raise ValueError(
"start_date must be before or equal to end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
screen_name = uuid4().hex
graph = pipeline.to_execution_plan(
screen_name,
self._root_mask_term,
self._calendar,
start_date,
end_date,
)
extra_rows = graph.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(start_date, end_date, extra_rows)
dates, assets, root_mask_values = explode(root_mask)
initial_workspace = self._populate_initial_workspace(
{
self._root_mask_term: root_mask_values,
self._root_mask_dates_term: as_column(dates.values)
},
self._root_mask_term,
graph,
dates,
assets,
)
results = self.compute_chunk(
graph,
dates,
assets,
initial_workspace,
)
return self._to_narrow(
graph.outputs,
results,
results.pop(screen_name),
dates[extra_rows:],
assets,
)
@copydoc(PipelineEngine.run_chunked_pipeline)
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
ranges = compute_date_range_chunks(
self._calendar,
start_date,
end_date,
chunksize,
)
chunks = [self.run_pipeline(pipeline, s, e) for s, e in ranges]
if len(chunks) == 1:
# OPTIMIZATION: Don't make an extra copy in `categorical_df_concat`
# if we don't have to.
return chunks[0]
return categorical_df_concat(chunks, inplace=True)
def _compute_root_mask(self, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of extra rows to compute before `start_date`.
Extra rows are needed by terms like moving averages that require a
trailing window of data.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
calendar = self._calendar
finder = self._finder
start_idx, end_idx = self._calendar.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=calendar[0],
lookback_start=start_date,
lookback_length=extra_rows,
)
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
lifetimes = finder.lifetimes(
calendar[start_idx - extra_rows:end_idx],
include_start_date=False
)
assert lifetimes.index[extra_rows] == start_date
assert lifetimes.index[-1] == end_date
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist from the farthest look back
# window through the end of the requested dates.
existed = lifetimes.any()
ret = lifetimes.loc[:, existed]
shape = ret.shape
assert shape[0] * shape[1] != 0, 'root mask cannot be empty'
return ret
@staticmethod
def _inputs_for_term(term, workspace, graph):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store as
many rows as will be necessary to serve **any** computation requiring
that input.
"""
offsets = graph.offset
out = []
if term.windowed:
# If term is windowed, then all input data should be instances of
# AdjustedArray.
for input_ in term.inputs:
adjusted_array = ensure_adjusted_array(
workspace[input_], input_.missing_value,
)
out.append(
adjusted_array.traverse(
window_length=term.window_length,
offset=offsets[term, input_],
)
)
else:
# If term is not windowed, input_data may be an AdjustedArray or
# np.ndarray. Coerce the former to the latter.
for input_ in term.inputs:
input_data = ensure_ndarray(workspace[input_])
offset = offsets[term, input_]
# OPTIMIZATION: Don't make a copy by doing input_data[0:] if
# offset is zero.
if offset:
input_data = input_data[offset:]
out.append(input_data)
return out
def get_loader(self, term):
return self._get_loader(term)
def compute_chunk(self, graph, dates, assets, initial_workspace):
"""
Compute the Pipeline terms in the graph for the requested start and end
dates.
Parameters
----------
graph : zipline.pipeline.graph.TermGraph
dates : pd.DatetimeIndex
Row labels for our root mask.
assets : pd.Int64Index
Column labels for our root mask.
initial_workspace : dict
Map from term -> output.
Must contain at least entry for `self._root_mask_term` whose shape
is `(len(dates), len(assets))`, but may contain additional
pre-computed terms for testing or optimization purposes.
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
self._validate_compute_chunk_params(dates, assets, initial_workspace)
get_loader = self.get_loader
# Copy the supplied initial workspace so we don't mutate it in place.
workspace = initial_workspace.copy()
# If loadable terms share the same loader and extra_rows, load them all
# together.
loader_group_key = juxt(get_loader, getitem(graph.extra_rows))
loader_groups = groupby(loader_group_key, graph.loadable_terms)
refcounts = graph.initial_refcounts(workspace)
for term in graph.execution_order(refcounts):
# `term` may have been supplied in `initial_workspace`, and in the
# future we may pre-compute loadable terms coming from the same
# dataset. In either case, we will already have an entry for this
# term, which we shouldn't re-compute.
if term in workspace:
continue
# Asset labels are always the same, but date labels vary by how
# many extra rows are needed.
mask, mask_dates = graph.mask_and_dates_for_term(
term,
self._root_mask_term,
workspace,
dates,
)
if isinstance(term, LoadableTerm):
to_load = sorted(
loader_groups[loader_group_key(term)],
key=lambda t: t.dataset
)
loader = get_loader(term)
loaded = loader.load_adjusted_array(
to_load, mask_dates, assets, mask,
)
assert set(loaded) == set(to_load), (
'loader did not return an AdjustedArray for each column\n'
'expected: %r\n'
'got: %r' % (sorted(to_load), sorted(loaded))
)
workspace.update(loaded)
else:
workspace[term] = term._compute(
self._inputs_for_term(term, workspace, graph),
mask_dates,
assets,
mask,
)
if term.ndim == 2:
assert workspace[term].shape == mask.shape
else:
assert workspace[term].shape == (mask.shape[0], 1)
# Decref dependencies of ``term``, and clear any terms whose
# refcounts hit 0.
for garbage_term in graph.decref_dependencies(term, refcounts):
del workspace[garbage_term]
out = {}
graph_extra_rows = graph.extra_rows
for name, term in iteritems(graph.outputs):
# Truncate off extra rows from outputs.
out[name] = workspace[term][graph_extra_rows[term]:]
return out
def _to_narrow(self, terms, data, mask, dates, assets):
"""
Convert raw computed pipeline results into a DataFrame for public APIs.
Parameters
----------
terms : dict[str -> Term]
Dict mapping column names to terms.
data : dict[str -> ndarray[ndim=2]]
Dict mapping column names to computed results for those names.
mask : ndarray[bool, ndim=2]
Mask array of values to keep.
dates : ndarray[datetime64, ndim=1]
Row index for arrays `data` and `mask`
assets : ndarray[int64, ndim=2]
Column index for arrays `data` and `mask`
Returns
-------
results : pd.DataFrame
The indices of `results` are as follows:
index : two-tiered MultiIndex of (date, asset).
Contains an entry for each (date, asset) pair corresponding to
a `True` value in `mask`.
columns : Index of str
One column per entry in `data`.
If mask[date, asset] is True, then result.loc[(date, asset), colname]
will contain the value of data[colname][date, asset].
"""
if not mask.any():
# Manually handle the empty DataFrame case. This is a workaround
# to pandas failing to tz_localize an empty dataframe with a
# MultiIndex. It also saves us the work of applying a known-empty
# mask to each array.
#
# Slicing `dates` here to preserve pandas metadata.
empty_dates = dates[:0]
empty_assets = array([], dtype=object)
return DataFrame(
data={
name: array([], dtype=arr.dtype)
for name, arr in iteritems(data)
},
index=MultiIndex.from_arrays([empty_dates, empty_assets]),
)
resolved_assets = array(self._finder.retrieve_all(assets))
dates_kept = repeat_last_axis(dates.values, len(assets))[mask]
assets_kept = repeat_first_axis(resolved_assets, len(dates))[mask]
final_columns = {}
for name in data:
# Each term that computed an output has its postprocess method
# called on the filtered result.
#
# As of Mon May 2 15:38:47 2016, we only use this to convert
# LabelArrays into categoricals.
final_columns[name] = terms[name].postprocess(data[name][mask])
return DataFrame(
data=final_columns,
index=MultiIndex.from_arrays([dates_kept, assets_kept]),
).tz_localize('UTC', level=0)
def _validate_compute_chunk_params(self, dates, assets, initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(assets)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/assets "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
)
|
apache-2.0
|
kmike/scikit-learn
|
examples/svm/plot_svm_regression.py
|
8
|
1431
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynominial and RBF
kernels.
"""
print(__doc__)
###############################################################################
# Generate sample data
import numpy as np
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
from sklearn.svm import SVR
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
import pylab as pl
pl.scatter(X, y, c='k', label='data')
pl.hold('on')
pl.plot(X, y_rbf, c='g', label='RBF model')
pl.plot(X, y_lin, c='r', label='Linear model')
pl.plot(X, y_poly, c='b', label='Polynomial model')
pl.xlabel('data')
pl.ylabel('target')
pl.title('Support Vector Regression')
pl.legend()
pl.show()
|
bsd-3-clause
|
mrklees/cy-automation-library
|
cyautomation/cyschoolhouse/ToT_audit.py
|
2
|
5757
|
import os
import pandas as pd
from StyleFrame import StyleFrame, Styler, utils
from .config import get_sch_ref_df
from . import simple_cysh as cysh
sch_ref_df = get_sch_ref_df()
def fix_T1T2ELT(sf=cysh.sf):
""" Standardize common spellings of "T1" "T2" and "ELT"
"""
typo_map = {
r'([Tt]([Ii][Ee]|[Ee][Ii])[Rr] ?|t)(1|[Oo]ne)':'T1',
r'([Tt]([Ii][Ee]|[Ee][Ii])[Rr] ?|t)(2|[Tt]wo)':'T2',
#r'([Aa]fter ?[Ss]chool|ASP)':'ELT',
}
all_typos = '|'.join(list(typo_map.keys()))
df = cysh.get_object_df('Intervention_Session__c', ['Id', 'Comments__c'], rename_id=True)
df['Comments__c'].fillna('', inplace=True)
df = df.loc[df['Comments__c'].str.contains(all_typos)]
df['Comments__c'].replace(typo_map, regex=True, inplace=True)
print(f"Found {len(df)} T1, T2, or ELT labels that can be fixed")
results = []
for index, row in df.iterrows():
result = sf.Intervention_Session__c.update(row['Intervention_Session__c'], {'Comments__c':row['Comments__c']})
results.append(results)
return results
def get_error_table():
ISR_df = cysh.get_object_df('Intervention_Session_Result__c', ['Amount_of_Time__c', 'IsDeleted', 'Intervention_Session_Date__c', 'Related_Student_s_Name__c', 'Intervention_Session__c', 'CreatedDate'])
IS_df = cysh.get_object_df('Intervention_Session__c', ['Id', 'Name', 'Comments__c', 'Section__c'], rename_id=True, rename_name=True)
section_df = cysh.get_object_df('Section__c', ['Id', 'School__c', 'Intervention_Primary_Staff__c', 'Program__c'], rename_id=True)
school_df = cysh.get_object_df('Account', ['Id', 'Name'])
school_df.rename(columns={'Id':'School__c', 'Name':'School_Name__c'}, inplace=True)
staff_df = cysh.get_object_df('Staff__c', ['Id', 'Name'], where="Site__c = 'Chicago'", rename_id=True, rename_name=True)
program_df = cysh.get_object_df('Program__c', ['Id', 'Name'], rename_id=True, rename_name=True)
df = ISR_df.merge(IS_df, how='left', on='Intervention_Session__c'); del df['Intervention_Session__c']
df = df.merge(section_df, how='left', on='Section__c'); del df['Section__c']
df = df.merge(school_df, how='left', on='School__c'); del df['School__c']
df = df.merge(staff_df, how='left', left_on='Intervention_Primary_Staff__c', right_on='Staff__c'); del df['Intervention_Primary_Staff__c'], df['Staff__c']
df = df.merge(program_df, how='left', on='Program__c'); del df['Program__c']
df['Intervention_Session_Date__c'] = pd.to_datetime(df['Intervention_Session_Date__c']).dt.date
df['CreatedDate'] = pd.to_datetime(df['CreatedDate']).dt.date
df['Comments__c'].fillna('', inplace=True)
df.loc[df['Program__c_Name'].str.contains('Tutoring')
& ~df['Comments__c'].str.contains('T1|T2'), 'Missing T1/T2 Code'] = 'Missing T1/T2 Code'
df.loc[df['Program__c_Name'].str.contains('Tutoring')
& df['Comments__c'].str.contains('T1')
& df['Comments__c'].str.contains('T2'), 'Listed T1 and T2'] = 'Listed T1 and T2'
df.loc[df['Program__c_Name'].str.contains('Tutoring|SEL')
& (df['Amount_of_Time__c'] < 10), '<10 Minutes'] = '<10 Minutes'
df.loc[df['Program__c_Name'].str.contains('Tutoring')
& (df['Amount_of_Time__c'] > 120), '>120 Minutes'] = '>120 Minutes'
df.loc[df['Intervention_Session_Date__c'] > df['CreatedDate'], 'Logged in Future'] = 'Logged in Future'
df.loc[df['Program__c_Name'].isin(['DESSA', 'Math Inventory', 'Reading Inventory']), 'Wrong Section'] = 'Wrong Section'
error_cols = ['Missing T1/T2 Code', 'Listed T1 and T2', '<10 Minutes', '>120 Minutes', 'Logged in Future', 'Wrong Section']
df['Error'] = df[error_cols].apply(lambda x: x.str.cat(sep=' & '), axis=1)
accepted_errors_df = pd.read_excel((
"Z:\\ChiPrivate\\Chicago Data and Evaluation\\SY19\\"
"SY19 ToT Audit Accepted Errors.xlsx"
))
df = df.loc[
(df['Error'] != '') &
~df['Intervention_Session__c_Name'].isin(accepted_errors_df['SESSION_ID'])
]
col_friendly_names = {
'School_Name__c':'School',
'Staff__c_Name':'ACM',
'Program__c_Name':'Program',
'Intervention_Session__c_Name':'Session ID',
'Related_Student_s_Name__c':'Student',
'CreatedDate':'Submission Date',
'Intervention_Session_Date__c':'Session Date',
'Amount_of_Time__c':'ToT',
#'Comments__c':'Comment',
'Error':'Error',
}
df = df.rename(columns=col_friendly_names)
df = df.sort_values(list(col_friendly_names.values()))
df = df[list(col_friendly_names.values())]
return df
def write_error_tables_to_cyconnect(df):
for index, row in sch_ref_df.iterrows():
school_error_df = df.loc[df['School'] == row['School']].copy()
del school_error_df['School']
write_path = f"Z:\\{row['Informal Name']} Team Documents\\SY19 ToT Audit Errors - {row['Informal Name']}.xlsx"
if os.path.exists(write_path):
os.remove(write_path)
excel_writer = StyleFrame.ExcelWriter(write_path)
sfr = StyleFrame(school_error_df)
sfr.apply_column_style(
cols_to_style=list(school_error_df),
styler_obj=Styler(
horizontal_alignment=utils.horizontal_alignments.left,
#vertical_alignment=utils.vertical_alignments.top
),
width=25,
style_header=False,
)
if len(school_error_df) > 0:
freeze = 'A2'
else:
freeze = 'A1'
sfr.to_excel(
excel_writer=excel_writer,
row_to_add_filters=0,
columns_and_rows_to_freeze=freeze,
)
excel_writer.save()
|
gpl-3.0
|
morrisonwudi/zipline
|
zipline/finance/performance/tracker.py
|
14
|
23349
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performance Tracking
====================
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| period_start | The beginning of the period to be tracked. datetime|
| | in pytz.utc timezone. Will always be 0:00 on the |
| | date in UTC. The fact that the time may be on the |
| | prior day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| period_end | The end of the period to be tracked. datetime |
| | in pytz.utc timezone. Will always be 23:59 on the |
| | date in UTC. The fact that the time may be on the |
| | next day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| progress | percentage of test completed |
+-----------------+----------------------------------------------------+
| capital_base | The initial capital assumed for this tracker. |
+-----------------+----------------------------------------------------+
| cumulative_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
+-----------------+----------------------------------------------------+
| todays_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker with datetime stamps between last_open|
| | and last_close. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
| | TODO: adding this because we calculate it. May be |
| | overkill. |
+-----------------+----------------------------------------------------+
| cumulative_risk | A dictionary representing the risk metrics |
| _metrics | calculated based on the positions aggregated |
| | through all the events delivered to this tracker. |
| | For details look at the comments for |
| | :py:meth:`zipline.finance.risk.RiskMetrics.to_dict`|
+-----------------+----------------------------------------------------+
"""
from __future__ import division
import logbook
import pickle
from six import iteritems
from datetime import datetime
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
import zipline.finance.risk as risk
from zipline.finance.trading import TradingEnvironment
from . period import PerformancePeriod
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
from . position_tracker import PositionTracker
log = logbook.Logger('Performance')
class PerformanceTracker(object):
"""
Tracks the performance of the algorithm.
"""
def __init__(self, sim_params):
self.sim_params = sim_params
env = TradingEnvironment.instance()
self.period_start = self.sim_params.period_start
self.period_end = self.sim_params.period_end
self.last_close = self.sim_params.last_close
first_open = self.sim_params.first_open.tz_convert(env.exchange_tz)
self.day = pd.Timestamp(datetime(first_open.year, first_open.month,
first_open.day), tz='UTC')
self.market_open, self.market_close = env.get_open_and_close(self.day)
self.total_days = self.sim_params.days_in_period
self.capital_base = self.sim_params.capital_base
self.emission_rate = sim_params.emission_rate
all_trading_days = env.trading_days
mask = ((all_trading_days >= normalize_date(self.period_start)) &
(all_trading_days <= normalize_date(self.period_end)))
self.trading_days = all_trading_days[mask]
self.dividend_frame = pd.DataFrame()
self._dividend_count = 0
self.position_tracker = PositionTracker()
self.perf_periods = []
if self.emission_rate == 'daily':
self.all_benchmark_returns = pd.Series(
index=self.trading_days)
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params)
elif self.emission_rate == 'minute':
self.all_benchmark_returns = pd.Series(index=pd.date_range(
self.sim_params.first_open, self.sim_params.last_close,
freq='Min'))
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params,
create_first_day_stats=True)
self.minute_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the cumulative period will be calculated over the
# entire test.
self.period_start,
self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumualtive period
serialize_positions=False
)
self.minute_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.minute_performance)
# this performance period will span the entire simulation from
# inception.
self.cumulative_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the cumulative period will be calculated over the entire test.
self.period_start,
self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumualtive period
serialize_positions=False,
)
self.cumulative_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.cumulative_performance)
# this performance period will span just the current market day
self.todays_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the daily period will be calculated for the market day
self.market_open,
self.market_close,
keep_transactions=True,
keep_orders=True,
serialize_positions=True,
)
self.todays_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.todays_performance)
self.saved_dt = self.period_start
# one indexed so that we reach 100%
self.day_count = 0.0
self.txn_count = 0
self.account_needs_update = True
self._account = None
def __repr__(self):
return "%s(%r)" % (
self.__class__.__name__,
{'simulation parameters': self.sim_params})
@property
def progress(self):
if self.emission_rate == 'minute':
# Fake a value
return 1.0
elif self.emission_rate == 'daily':
return self.day_count / self.total_days
def set_date(self, date):
if self.emission_rate == 'minute':
self.saved_dt = date
self.todays_performance.period_close = self.saved_dt
def update_dividends(self, new_dividends):
"""
Update our dividend frame with new dividends. @new_dividends should be
a DataFrame with columns containing at least the entries in
zipline.protocol.DIVIDEND_FIELDS.
"""
# Mark each new dividend with a unique integer id. This ensures that
# we can differentiate dividends whose date/sid fields are otherwise
# identical.
new_dividends['id'] = np.arange(
self._dividend_count,
self._dividend_count + len(new_dividends),
)
self._dividend_count += len(new_dividends)
self.dividend_frame = pd.concat(
[self.dividend_frame, new_dividends]
).sort(['pay_date', 'ex_date']).set_index('id', drop=False)
def initialize_dividends_from_other(self, other):
"""
Helper for copying dividends to a new PerformanceTracker while
preserving dividend count. Useful if a simulation needs to create a
new PerformanceTracker mid-stream and wants to preserve stored dividend
info.
Note that this does not copy unpaid dividends.
"""
self.dividend_frame = other.dividend_frame
self._dividend_count = other._dividend_count
def handle_sid_removed_from_universe(self, sid):
"""
This method handles any behaviors that must occur when a SID leaves the
universe of the TradingAlgorithm.
Parameters
__________
sid : int
The sid of the Asset being removed from the universe.
"""
# Drop any dividends for the sid from the dividends frame
self.dividend_frame = self.dividend_frame[
self.dividend_frame.sid != sid
]
def update_performance(self):
# calculate performance as of last trade
for perf_period in self.perf_periods:
perf_period.calculate_performance()
def get_portfolio(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
return self.cumulative_performance.as_portfolio()
def get_account(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
if self.account_needs_update:
self._update_account()
return self._account
def _update_account(self):
self._account = self.cumulative_performance.as_account()
self.account_needs_update = False
def to_dict(self, emission_type=None):
"""
Creates a dictionary representing the state of this tracker.
Returns a dict object of the form described in header comments.
"""
# Default to the emission rate of this tracker if no type is provided
if emission_type is None:
emission_type = self.emission_rate
_dict = {
'period_start': self.period_start,
'period_end': self.period_end,
'capital_base': self.capital_base,
'cumulative_perf': self.cumulative_performance.to_dict(),
'progress': self.progress,
'cumulative_risk_metrics': self.cumulative_risk_metrics.to_dict()
}
if emission_type == 'daily':
_dict['daily_perf'] = self.todays_performance.to_dict()
elif emission_type == 'minute':
_dict['minute_perf'] = self.todays_performance.to_dict(
self.saved_dt)
else:
raise ValueError("Invalid emission type: %s" % emission_type)
return _dict
def process_trade(self, event):
# update last sale, and pay out a cash adjustment
cash_adjustment = self.position_tracker.update_last_sale(event)
if cash_adjustment != 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(cash_adjustment)
def process_transaction(self, event):
self.txn_count += 1
self.position_tracker.execute_transaction(event)
for perf_period in self.perf_periods:
perf_period.handle_execution(event)
def process_dividend(self, dividend):
log.info("Ignoring DIVIDEND event.")
def process_split(self, event):
leftover_cash = self.position_tracker.handle_split(event)
if leftover_cash > 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(leftover_cash)
def process_order(self, event):
for perf_period in self.perf_periods:
perf_period.record_order(event)
def process_commission(self, event):
self.position_tracker.handle_commission(event)
for perf_period in self.perf_periods:
perf_period.handle_commission(event)
def process_benchmark(self, event):
if self.sim_params.data_frequency == 'minute' and \
self.sim_params.emission_rate == 'daily':
# Minute data benchmarks should have a timestamp of market
# close, so that calculations are triggered at the right time.
# However, risk module uses midnight as the 'day'
# marker for returns, so adjust back to midnight.
midnight = pd.tseries.tools.normalize_date(event.dt)
else:
midnight = event.dt
if midnight not in self.all_benchmark_returns.index:
raise AssertionError(
("Date %s not allocated in all_benchmark_returns. "
"Calendar seems to mismatch with benchmark. "
"Benchmark container is=%s" %
(midnight,
self.all_benchmark_returns.index)))
self.all_benchmark_returns[midnight] = event.returns
def process_close_position(self, event):
# CLOSE_POSITION events that contain prices that must be handled as
# a final trade event
if 'price' in event:
self.process_trade(event)
txn = self.position_tracker.\
maybe_create_close_position_transaction(event)
if txn:
self.process_transaction(txn)
def check_upcoming_dividends(self, next_trading_day):
"""
Check if we currently own any stocks with dividends whose ex_date is
the next trading day. Track how much we should be payed on those
dividends' pay dates.
Then check if we are owed cash/stock for any dividends whose pay date
is the next trading day. Apply all such benefits, then recalculate
performance.
"""
if len(self.dividend_frame) == 0:
# We don't currently know about any dividends for this simulation
# period, so bail.
return
# Dividends whose ex_date is the next trading day. We need to check if
# we own any of these stocks so we know to pay them out when the pay
# date comes.
ex_date_mask = (self.dividend_frame['ex_date'] == next_trading_day)
dividends_earnable = self.dividend_frame[ex_date_mask]
# Dividends whose pay date is the next trading day. If we held any of
# these stocks on midnight before the ex_date, we need to pay these out
# now.
pay_date_mask = (self.dividend_frame['pay_date'] == next_trading_day)
dividends_payable = self.dividend_frame[pay_date_mask]
position_tracker = self.position_tracker
if len(dividends_earnable):
position_tracker.earn_dividends(dividends_earnable)
if not len(dividends_payable):
return
net_cash_payment = position_tracker.pay_dividends(dividends_payable)
for period in self.perf_periods:
# notify periods to update their stats
period.handle_dividends_paid(net_cash_payment)
def check_asset_auto_closes(self, next_trading_day):
"""
Check if the position tracker currently owns any Assets with an
auto-close date that is the next trading day. Close those positions.
Parameters
----------
next_trading_day : pandas.Timestamp
The next trading day of the simulation
"""
auto_close_events = self.position_tracker.auto_close_position_events(
next_trading_day=next_trading_day
)
for event in auto_close_events:
self.process_close_position(event)
def handle_minute_close(self, dt):
"""
Handles the close of the given minute. This includes handling
market-close functions if the given minute is the end of the market
day.
Parameters
__________
dt : Timestamp
The minute that is ending
Returns
_______
(dict, dict/None)
A tuple of the minute perf packet and daily perf packet.
If the market day has not ended, the daily perf packet is None.
"""
self.update_performance()
todays_date = normalize_date(dt)
account = self.get_account(False)
self.minute_performance.rollover()
bench_returns = self.all_benchmark_returns.loc[todays_date:dt]
# cumulative returns
bench_since_open = (1. + bench_returns).prod() - 1
self.cumulative_risk_metrics.update(todays_date,
self.todays_performance.returns,
bench_since_open,
account)
minute_packet = self.to_dict(emission_type='minute')
# if this is the close, update dividends for the next day.
# Return the performance tuple
if dt == self.market_close:
return (minute_packet, self._handle_market_close(todays_date))
else:
return (minute_packet, None)
def handle_market_close_daily(self):
"""
Function called after handle_data when running with daily emission
rate.
"""
self.update_performance()
completed_date = self.day
account = self.get_account(False)
# update risk metrics for cumulative performance
self.cumulative_risk_metrics.update(
completed_date,
self.todays_performance.returns,
self.all_benchmark_returns[completed_date],
account)
return self._handle_market_close(completed_date)
def _handle_market_close(self, completed_date):
# increment the day counter before we move markers forward.
self.day_count += 1.0
# Get the next trading day and, if it is past the bounds of this
# simulation, return the daily perf packet
next_trading_day = TradingEnvironment.instance().\
next_trading_day(completed_date)
# Check if any assets need to be auto-closed before generating today's
# perf period
if next_trading_day:
self.check_asset_auto_closes(next_trading_day=next_trading_day)
# Take a snapshot of our current performance to return to the
# browser.
daily_update = self.to_dict(emission_type='daily')
# On the last day of the test, don't create tomorrow's performance
# period. We may not be able to find the next trading day if we're at
# the end of our historical data
if self.market_close >= self.last_close:
return daily_update
# move the market day markers forward
env = TradingEnvironment.instance()
self.market_open, self.market_close = \
env.next_open_and_close(self.day)
self.day = env.next_trading_day(self.day)
# Roll over positions to current day.
self.todays_performance.rollover()
self.todays_performance.period_open = self.market_open
self.todays_performance.period_close = self.market_close
# If the next trading day is irrelevant, then return the daily packet
if (next_trading_day is None) or (next_trading_day >= self.last_close):
return daily_update
# Check for any dividends and auto-closes, then return the daily perf
# packet
self.check_upcoming_dividends(next_trading_day=next_trading_day)
return daily_update
def handle_simulation_end(self):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log_msg = "Simulated {n} trading days out of {m}."
log.info(log_msg.format(n=int(self.day_count), m=self.total_days))
log.info("first open: {d}".format(
d=self.sim_params.first_open))
log.info("last close: {d}".format(
d=self.sim_params.last_close))
bms = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.benchmark_returns_cont)
ars = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.algorithm_returns_cont)
acl = self.cumulative_risk_metrics.algorithm_cumulative_leverages
self.risk_report = risk.RiskReport(
ars,
self.sim_params,
benchmark_returns=bms,
algorithm_leverages=acl)
risk_dict = self.risk_report.to_dict()
return risk_dict
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
state_dict['dividend_frame'] = pickle.dumps(self.dividend_frame)
state_dict['_dividend_count'] = self._dividend_count
# we already store perf periods as attributes
del state_dict['perf_periods']
STATE_VERSION = 3
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 3
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("PerformanceTracker saved state is too old.")
self.__dict__.update(state)
# Handle the dividend frame specially
self.dividend_frame = pickle.loads(state['dividend_frame'])
# properly setup the perf periods
self.perf_periods = []
p_types = ['cumulative', 'todays', 'minute']
for p_type in p_types:
name = p_type + '_performance'
period = getattr(self, name, None)
if period is None:
continue
period._position_tracker = self.position_tracker
self.perf_periods.append(period)
|
apache-2.0
|
sdh11/gnuradio
|
gr-digital/examples/example_fll.py
|
7
|
5704
|
#!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_arg import eng_float, intx
from argparse import ArgumentParser
import sys
import numpy
try:
from matplotlib import pyplot
except ImportError:
print("Error: could not from matplotlib import pyplot (http://matplotlib.sourceforge.net/)")
sys.exit(1)
class example_fll(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*numpy.random.randint(0, 2, N) - 1.0
data = numpy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.fll = digital.fll_band_edge_cc(sps, rolloff, ntaps, bw)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_fll = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.vsnk_err = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.fll,1), self.vsnk_frq)
self.connect((self.fll,2), self.vsnk_phs)
self.connect((self.fll,3), self.vsnk_err)
def main():
parser = ArgumentParser(conflict_handler="resolve")
parser.add_argument("-N", "--nsamples", type=int, default=2000,
help="Set the number of samples to process [default=%(default)r]")
parser.add_argument("-S", "--sps", type=int, default=4,
help="Set the samples per symbol [default=%(default)r]")
parser.add_argument("-r", "--rolloff", type=eng_float, default=0.35,
help="Set the rolloff factor [default=%(default)r]")
parser.add_argument("-W", "--bandwidth", type=eng_float, default=2*numpy.pi/100.0,
help="Set the loop bandwidth [default=%(default)r]")
parser.add_argument("-n", "--ntaps", type=int, default=45,
help="Set the number of taps in the filters [default=%(default)r]")
parser.add_argument("--noise", type=eng_float, default=0.0,
help="Set the simulation noise voltage [default=%(default)r]")
parser.add_argument("-f", "--foffset", type=eng_float, default=0.2,
help="Set the simulation's normalized frequency offset (in Hz) [default=%(default)r]")
parser.add_argument("-t", "--toffset", type=eng_float, default=1.0,
help="Set the simulation's timing offset [default=%(default)r]")
parser.add_argument("-p", "--poffset", type=eng_float, default=0.0,
help="Set the simulation's phase offset [default=%(default)r]")
args = parser.parse_args()
# Adjust N for the interpolation by sps
args.nsamples = args.nsamples // args.sps
# Set up the program-under-test
put = example_fll(args.nsamples, args.sps, args.rolloff,
args.ntaps, args.bandwidth, args.noise,
args.foffset, args.toffset, args.poffset)
put.run()
data_src = numpy.array(put.vsnk_src.data())
data_err = numpy.array(put.vsnk_err.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = numpy.array(put.vsnk_frq.data()) / (2.0*numpy.pi)
# adjust this to align with the data. There are 2 filters of
# ntaps long and the channel introduces another 4 sample delay.
data_fll = numpy.array(put.vsnk_fll.data()[2*args.ntaps-4:])
# Plot the FLL's LO frequency
f1 = pyplot.figure(1, figsize=(12,10))
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("FLL LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the FLL's error
s2 = f1.add_subplot(2,2,2)
s2.plot(data_err)
s2.set_title("FLL Error")
s2.set_xlabel("Samples")
s2.set_ylabel("FLL Loop error")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,3)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_fll.real, data_fll.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
# Plot the symbols in time
s4 = f1.add_subplot(2,2,4)
s4.plot(data_src.real, "o-")
s4.plot(data_fll.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
JPFrancoia/scikit-learn
|
sklearn/linear_model/bayes.py
|
50
|
16145
|
"""
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_offset, y_offset, X_scale)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset, y_offset, X_scale)
return self
|
bsd-3-clause
|
wangkua1/sportvu
|
sportvu/vis/Game.py
|
1
|
1380
|
import pandas as pd
from Event import Event
from Team import Team
from Constant import Constant
import cPickle as pickle
class Game:
"""A class for keeping info about the games"""
def __init__(self, path_to_pickle, event_index):
# self.events = None
self.home_team = None
self.guest_team = None
self.event = None
self.path_to_pickle = path_to_pickle
self.event_index = event_index
def read_json(self):
# data_frame = pd.read_json(self.path_to_pickle)
with open(self.path_to_pickle, 'rb') as handle:
data_frame = pickle.load(handle)
data_frame
last_default_index = len(data_frame['events']) - 1
print ('...')
print (last_default_index)
self.event_index = min(self.event_index, last_default_index)
index = self.event_index
print(Constant.MESSAGE + str(last_default_index))
event = data_frame['events'][index]
self.event = Event(event)
self.home_team = Team(event['home']['teamid'])
self.guest_team = Team(event['visitor']['teamid'])
def start(self):
self.event.show()
def find_sequence(self, start_game_clock, end_game_clock):
"""
find [start_game_clock, end_game_clock] sequence in Game
return a Event with moments truncated as specified
"""
|
mit
|
askielboe/JAVELIN
|
javelin/lcmodel.py
|
1
|
100414
|
#Last-modified: 08 Dec 2013 01:13:36
# generic packages
import numpy as np
#np.seterr(all='raise')
from numpy.random import normal, multivariate_normal
from scipy.optimize import fmin
import matplotlib.pyplot as plt
# internal packages
from cholesky_utils import cholesky, trisolve, chosolve, chodet, chosolve_from_tri, chodet_from_tri
from zylc import LightCurve, get_data
from cov import get_covfunc_dict
from spear import spear, spear_threading
from predict import PredictSignal, PredictRmap, PredictPmap, PredictSPmap
from gp import FullRankCovariance, NearlyFullRankCovariance
from err import *
from emcee import EnsembleSampler
from graphic import figure_handler
my_neg_inf = float(-1.0e+300)
my_pos_inf = float( 1.0e+300)
tau_floor = 1.e-6
tau_ceiling = 1.e+5
sigma_floor = 1.e-6
sigma_ceiling = 1.e+2
logtau_floor = np.log(tau_floor)
logtau_ceiling = np.log(tau_ceiling)
logsigma_floor = np.log(sigma_floor)
logsigma_ceiling = np.log(sigma_ceiling)
nu_floor = 1.e-6
lognu_floor = np.log(nu_floor)
nu_ceiling = 1.e+3
lognu_ceiling = np.log(nu_ceiling)
__all__ = ['Cont_Model', 'Rmap_Model', 'Pmap_Model', 'SPmap_Model']
""" Generic functions """
def _lnlike_from_U(U, zydata, set_retq=False, set_verbose=False):
""" Calculate the log-likelihoods from the upper triangle of cholesky decomposition.
"""
# log determinant of C^-1
detC_log = chodet_from_tri(U, retlog=True)
# solve for C a = y so that a = C^-1 y
a = chosolve_from_tri(U, zydata.marr)
# solve for C b = L so that b = C^-1 L
b = chosolve_from_tri(U, zydata.larr)
# multiply L^T and b so that C_p = L^T C^-1 L = C_q^-1
C_p = np.dot(zydata.larrTr, b)
# for 'issingle is True' case, C_p is a scalar.
if np.isscalar(C_p):
# for single-mode, cholesky of C_p is simply squre-root of C_p
W = np.sqrt(C_p)
detCp_log = np.log(C_p.squeeze())
# for single-mode, simply devide L^T by C_p
d = zydata.larrTr/C_p
else:
# cholesky decompose C_p so that W^T W = C_p
W, info = cholesky(C_p, raiseinfo=False)
if info > 0 :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance W",
set_verbose=set_verbose))
detCp_log = chodet_from_tri(W, retlog=True)
# solve for C_p d = L^T so that d = C_p^-1 L^T = C_q L^T
d = chosolve_from_tri(W, zydata.larrTr)
# multiply b d and a so that e = C^-1 L C_p^-1 L^T C^-1 y
e = np.dot(b, np.dot(d, a))
# a minus e so that f = a - e = C^-1 y - C^-1 L C_p^-1 L^T C^-1 y
# thus f = C_v^-1 y
f = a - e
# multiply y^T and f so that h = y^T C_v^-1 y
h = np.dot(zydata.marr, f)
# chi2_PRH = -0.5*h
_chi2 = -0.5*h
# following Carl Rasmussen's term, a penalty on the complexity of
# the model
_compl_pen = -0.5*detC_log
# penalty on blatant linear drift
_wmean_pen = -0.5*detCp_log
# final log_likelhood
_log_like = _chi2 + _compl_pen + _wmean_pen
if set_retq:
q = np.dot(d, a)
return(_log_like, _chi2, _compl_pen, _wmean_pen, q)
else:
return(_log_like)
def _exit_with_retval(nlc, set_retq, errmsg=None, set_verbose=False):
""" Return failure elegantly.
When you are desperate and just want to leave the calculation with
appropriate return values that quietly speak out your angst.
"""
if errmsg is not None:
if set_verbose:
print("Exit: %s"%errmsg)
if set_retq :
return(my_neg_inf, my_neg_inf, my_neg_inf, my_neg_inf,
[my_neg_inf]*nlc)
else:
return(my_neg_inf)
###########################################################
""" Cont_Model: Continuum Variability """
def unpacksinglepar(p, covfunc="drw", uselognu=False) :
""" Internal Function: Unpack the physical parameters from input 1-d array for single mode.
"""
if p[0] > logsigma_ceiling :
sigma = sigma_ceiling
elif p[0] < logsigma_floor :
sigma = sigma_floor
else :
sigma = np.exp(p[0])
if p[1] > logtau_ceiling :
tau = tau_ceiling
elif p[1] < logtau_floor :
tau = tau_floor
else :
tau = np.exp(p[1])
if covfunc == "drw" :
nu = None
elif uselognu :
if p[2] < lognu_floor :
nu = nu_floor
elif p[2] > lognu_ceiling :
nu = nu_ceiling
else :
nu = np.exp(p[2])
else :
nu = p[2]
return(sigma, tau, nu)
# try fix the order of arguments as they will be fed by sequence rather than keyword to methods under Cont_Model.
def lnpostfn_single_p(p, zydata, covfunc, set_prior=True, conthpd=None, uselognu=False, rank="Full", set_retq=False, set_verbose=False) :
""" Calculate the log posterior for parameter set `p`.
Parameters
----------
p: list
Parameter list.
zydata: LightCurve
Input LightCurve data.
covfunc: str
name of the covariance function.
set_prior: bool, optional
Turn on/off priors that are predefined in `lnpostfn_single_p` (default: True).
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log(sigma_low), log(tau_low)],
[log(sigma_med), log(tau_med)],
[log(sigma_hig), log(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. Here it is only used if the `covfunc` is 'kepler2_exp'.
uselognu: bool, optional
Whether to use lognu instead of nu (default: False).
rank: str, optional
Type of covariance matrix rank, "Full" or "NearlyFull" (default: "Full").
set_retq: bool, optional
Whether to return all the components of the posterior (default: False).
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component, DC_penalty, correction_to_the_mean].
"""
sigma, tau, nu = unpacksinglepar(p, covfunc, uselognu=uselognu)
# log-likelihood
if set_retq :
vals = list(lnlikefn_single(zydata, covfunc=covfunc, rank=rank, sigma=sigma, tau=tau, nu=nu, set_retq=True, set_verbose=set_verbose))
else :
logl = lnlikefn_single(zydata, covfunc=covfunc, rank=rank, sigma=sigma, tau=tau, nu=nu, set_retq=False, set_verbose=set_verbose)
# prior
prior = 0.0
if set_prior :
if covfunc == "kepler2_exp" :
if conthpd is None :
raise RuntimeError("kepler2_exp prior requires conthpd")
# for sigma
if p[0] < conthpd[1,0] :
prior0 = (p[0] - conthpd[1,0])/(conthpd[1,0]-conthpd[0,0])
else :
prior0 = (p[0] - conthpd[1,0])/(conthpd[2,0]-conthpd[1,0])
# for tau
if p[1] < conthpd[1,1] :
prior1 = (p[1] - conthpd[1,1])/(conthpd[1,1]-conthpd[0,1])
else :
prior1 = (p[1] - conthpd[1,1])/(conthpd[2,1]-conthpd[1,1])
# final
prior += -0.5*(prior0*prior0+prior1*prior1)
else :
prior += - np.log(sigma)
if tau > zydata.cont_cad :
prior += - np.log(tau/zydata.cont_cad)
elif tau < 0.001 :
# 86.4 seconds if input is in days
prior += my_neg_inf
else :
prior += - np.log(zydata.cont_cad/tau)
# combine prior and log-likelihood
if set_retq :
vals[0] = vals[0] + prior
vals.append(prior)
return(vals)
else :
logp = logl + prior
return(logp)
def lnlikefn_single(zydata, covfunc="drw", rank="Full", set_retq=False, set_verbose=False, **covparams) :
""" internal function to calculate the log likelihood, see `lnpostfn_single_p` for doc.
"""
covfunc_dict = get_covfunc_dict(covfunc, **covparams)
sigma = covparams.pop("sigma")
tau = covparams.pop("tau")
nu = covparams.pop("nu", None)
# set up covariance function
if (sigma<=0.0 or tau<=0.0) :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters",
set_verbose=set_verbose))
if covfunc == "pow_exp" :
if nu <= 0.0 or nu >= 2.0 :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters in nu",
set_verbose=set_verbose))
elif covfunc == "matern" :
if nu <= 0.0 :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters in nu",
set_verbose=set_verbose))
elif covfunc == "kepler_exp" :
# here nu is the ratio
if nu < 0.0 or nu >= 1.0 :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters in nu",
set_verbose=set_verbose))
elif covfunc == "kepler2_exp" :
# here nu is the cutoff time scale
if nu < 0.0 or nu >= tau :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters in nu",
set_verbose=set_verbose))
# choice of ranks
if rank == "Full" :
# using full-rank
C = FullRankCovariance(**covfunc_dict)
elif rank == "NearlyFull" :
# using nearly full-rank
C = NearlyFullRankCovariance(**covfunc_dict)
else :
raise InputError("No such option for rank "+rank)
# cholesky decompose S+N so that U^T U = S+N = C
# using intrinsic method of C without explicitly writing out cmatrix
try :
U = C.cholesky(zydata.jarr, observed=False, nugget=zydata.varr)
except :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance C",
set_verbose=set_verbose))
# calculate RPH likelihood
retval = _lnlike_from_U(U, zydata, set_retq=set_retq, set_verbose=set_verbose)
return(retval)
class Cont_Model(object) :
def __init__(self, zydata=None, covfunc="drw") :
""" Cont Model object.
Parameters
----------
zydata: LightCurve object, optional
Input LightCurve data, a null input means that `Cont_Model` will be loading existing chains (default: None).
covfunc: str, optional
Name of the covariance function for the continuum (default: drw)
"""
self.zydata = zydata
self.covfunc = covfunc
if zydata is None :
pass
else :
self.nlc = zydata.nlc
self.npt = zydata.npt
self.cont_npt = zydata.nptlist[0]
self.cont_cad = zydata.cont_cad
self.cont_cad_min = zydata.cont_cad_min
self.cont_cad_max = zydata.cont_cad_max
self.cont_std = zydata.cont_std
self.rj = zydata.rj
self.jstart = zydata.jstart
self.jend = zydata.jend
self.names = zydata.names
self.vars = ["sigma", "tau"]
self.texs = [r"$\log\,\sigma$", r"$\log\,\tau$"]
if covfunc == "drw" :
self.uselognu = False
self.ndim = 2
elif covfunc == "matern" or covfunc == "kepler2_exp" :
self.uselognu = True
self.ndim = 3
self.vars.append("nu")
self.texs.append(r"$\log\,\nu$")
else :
self.uselognu = False
self.ndim = 3
self.vars.append("nu")
self.texs.append(r"$\nu$")
def __call__(self, p, **lnpostparams):
""" Calculate the posterior value given one parameter set `p`. See `lnpostfn_single_p` for doc.
"""
return(lnpostfn_single_p(p, self.zydata, covfunc=self.covfunc, uselognu=self.uselognu, **lnpostparams))
def do_map(self, p_ini, fixed=None, **lnpostparams) :
"""
Maximum A Posterior minimization. See `lnpostfn_single_p` for doc.
Parameters
----------
p_ini: list
Initial guess for the parameters.
fixed: list
Bit list indicating which parameters are to be fixed during minimization, `1` means varying, while `0` means fixed, so [1, 1, 0] means fixing only the third parameter, and `len(fixed)` equals the number of parameters (default: None, i.e., varying all the parameters simultaneously).
lnpostparams: kwargs
kwargs for `lnpostfn_single_p`.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
set_retq = lnpostparams.pop("set_retq", False)
set_prior = lnpostparams.pop("set_prior", True)
rank = lnpostparams.pop("rank", "Full")
conthpd = lnpostparams.pop("conthpd", None)
if set_retq is True :
raise InputError("set_retq has to be False")
p_ini = np.asarray(p_ini)
if fixed is not None :
fixed = np.asarray(fixed)
func = lambda _p : -lnpostfn_single_p(
_p*fixed+p_ini*(1.-fixed), self.zydata, self.covfunc,
set_prior=set_prior,
conthpd=conthpd,
uselognu=self.uselognu,
rank=rank,
set_retq=False,
set_verbose=set_verbose
)
else :
func = lambda _p : -lnpostfn_single_p(
_p, self.zydata, self.covfunc,
set_prior=set_prior,
conthpd=conthpd,
uselognu=self.uselognu,
rank=rank,
set_retq=False,
set_verbose=set_verbose
)
p_bst, v_bst = fmin(func, p_ini, full_output=True)[:2]
sigma, tau, nu = unpacksinglepar(p_bst, covfunc=self.covfunc, uselognu=self.uselognu)
if fixed is not None :
p_bst = p_bst*fixed+p_ini*(1.-fixed)
if set_verbose :
print("Best-fit parameters are:")
print("sigma %8.3f tau %8.3f"%(sigma, tau))
if nu is not None :
print("nu %8.3f"%nu)
print("with logp %10.5g "%-v_bst)
return(p_bst, -v_bst)
def do_grid1d(self, p_ini, fixed, rangex, dx, fgrid1d, **lnpostparams) :
""" Minimization over a 1D grid. See `lnpostfn_single_p` for doc.
Parameters
----------
p_ini : list
Initial guess for the parameters.
fixed : list
Bit list indicating which parameters are to be fixed during minimization, `1` means varying, while `0` means fixed, so [1, 1, 0] means fixing only the third parameter, and `len(fixed)` equals the number of parameters (default: None, i.e., varying all the parameters simultaneously).
rangex : tuple
range of `x`, i.e., (xmin, xmax)
dx : float
bin size in `x`.
fgrid1d: str
filename for the output.
lnpostparams: kwargs
kwargs for `lnpostfn_single_p`.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
xs = np.arange(rangex[0], rangex[-1]+dx, dx)
fixed = np.asarray(fixed)
nfixed = np.sum(fixed == 0)
if nfixed != 1 :
raise InputError("wrong number of fixed pars ")
f = open(fgrid1d, "w")
for x in xs :
_p_ini = p_ini*fixed + x*(1.-fixed)
_p, _l = self.do_map(_p_ini, fixed=fixed, **lnpostparams)
_line = "".join([format(_l, "20.10g"), " ".join([format(r, "10.5f") for r in _p]), "\n"])
f.write(_line)
f.flush()
f.close()
if set_verbose :
print("saved grid1d result to %s"%fgrid1d)
def do_grid2d(self, p_ini, fixed, rangex, dx, rangey, dy, fgrid2d, **lnpostparams) :
""" Minimization over a 2D grid. See `lnpostfn_single_p` for doc.
Parameters
----------
p_ini : list
Initial guess for the parameters.
fixed : list
Bit list indicating which parameters are to be fixed during minimization, `1` means varying, while `0` means fixed, so [1, 1, 0] means fixing only the third parameter, and `len(fixed)` equals the number of parameters (default: None, i.e., varying all the parameters simultaneously).
rangex : tuple
range of `x`, i.e., (xmin, xmax)
dx : float
bin size in `x`.
rangey : tuple
range of `y`, i.e., (ymin, ymax)
dy : float
bin size in `y`.
fgrid2d: str
filename for the output.
lnpostparams: kwargs
kwargs for `lnpostfn_single_p`.
"""
fixed = np.asarray(fixed)
set_verbose = lnpostparams.pop("set_verbose", True)
xs = np.arange(rangex[0], rangex[-1]+dx, dx)
ys = np.arange(rangey[0], rangey[-1]+dy, dy)
nfixed = np.sum(fixed == 0)
if nfixed != 2 :
raise InputError("wrong number of fixed pars ")
posx, posy = np.nonzero(1-fixed)[0]
dimx, dimy = len(xs),len(ys)
header = " ".join(["#", str(posx), str(posy), str(dimx), str(dimy), "\n"])
print(header)
f = open(fgrid2d, "w")
f.write(header)
for x in xs :
for y in ys :
_p_ini = p_ini*fixed
_p_ini[posx] = x
_p_ini[posy] = y
_p, _l = self.do_map(_p_ini, fixed=fixed, **lnpostparams)
_line = "".join([format(_l, "20.10g"), " ".join([format(r, "10.5f") for r in _p]), "\n"])
f.write(_line)
f.flush()
f.close()
if set_verbose :
print("saved grid2d result to %s"%fgrid2d)
def read_logp_map(self, fgrid2d, set_verbose=True) :
""" Read the output from `do_grid2d`.
Parameters
----------
fgrid2d: str
filename.
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
Returns
-------
retdict: dict
Grid returned as a dict.
"""
f = open(fgrid2d, "r")
posx, posy, dimx, dimy = [int(r) for r in f.readline().lstrip("#").split()]
if set_verbose :
print("grid file %s is registered for"%fgrid2d)
print("var_x = %10s var_y = %10s"%(self.vars[posx], self.vars[posy]))
print("dim_x = %10d dim_y = %10d"%(dimx, dimy))
if self.covfunc != "drw" :
logp, sigma, tau, nu = np.genfromtxt(f,unpack=True,usecols=(0,1,2,3))
else :
logp, sigma, tau = np.genfromtxt(f,unpack=True,usecols=(0,1,2))
f.close()
retdict = {
'logp' : logp.reshape(dimx, dimy).T,
'sigma' : sigma.reshape(dimx, dimy).T,
'tau' : tau.reshape(dimx, dimy).T,
'nu' : None,
'posx' : posx,
'posy' : posy,
'dimx' : dimx,
'dimy' : dimy,
}
if self.covfunc != "drw" :
retdict['nu'] = nu.reshape(dimx, dimy).T
return(retdict)
def show_logp_map(self, fgrid2d, set_normalize=True, vmin=None, vmax=None, set_contour=True, clevels=None, set_verbose=True, figout=None, figext=None) :
""" Display the grid output from `do_grid2d`.
Parameters
----------
fgrid2d: str
filename.
set_normalize: bool, optional
Whether to normalize the histogram.
vmin: float, optional
Minimum value of the histogram.
set_contour: bool, optional
Whether to overplot contours (default: True).
clevels: list, optional
Contour levels. `clevels` = None will set the levels as if the likelihood is for a Gaussian model with two parameters.
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
ln10 = np.log(10.0)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
retdict = self.read_logp_map(fgrid2d, set_verbose=set_verbose)
x = retdict[self.vars[retdict['posx']]]/ln10
y = retdict[self.vars[retdict['posy']]]/ln10
z = retdict['logp']
if x is None or y is None :
raise InputError("incompatible fgrid2d file"+fgrid2d)
xmin,xmax,ymin,ymax = np.min(x),np.max(x),np.min(y),np.max(y)
extent = (xmin,xmax,ymin,ymax)
if set_normalize :
zmax = np.max(z)
z = z - zmax
if vmin is None:
vmin = z.min()
if vmax is None:
vmax = z.max()
im = ax.imshow(z, origin='lower', vmin=vmin, vmax=vmax,
cmap='jet', interpolation="nearest", aspect="auto",
extent=extent)
if set_contour:
if clevels is None:
sigma3,sigma2,sigma1 = 11.8/2.0,6.17/2.0,2.30/2.0
levels = (vmax-sigma1, vmax-sigma2, vmax-sigma3)
else:
levels = clevels
ax.set_autoscale_on(False)
cs = ax.contour(z,levels, hold='on',colors='k',
origin='lower',extent=extent)
ax.set_xlabel(self.texs[retdict['posx']])
ax.set_ylabel(self.texs[retdict['posy']])
return(figure_handler(fig=fig, figout=figout, figext=figext))
def do_mcmc(self, conthpd=None, set_prior=True, rank="Full", nwalkers=100, nburn=50, nchain=50, fburn=None, fchain=None, flogp=None, threads=1, set_verbose=True):
""" Run MCMC sampling over the parameter space.
Parameters
----------
conthpd : ndarray, optional
Usually the `hpd` array generated from the MCMC chain using `Cont_Model` (default: None).
set_prior: bool, optional
Turn on/off priors that are predefined in `lnpostfn_single_p` (default: True).
rank: str, optional
Type of covariance matrix rank, "Full" or "NearlyFull" (default: "Full").
nwalker : integer, optional
Number of walkers for `emcee` (default: 100).
nburn : integer, optional
Number of burn-in steps for `emcee` (default: 50).
nchain : integer, optional
Number of chains for `emcee` (default: 50).
fburn : str, optional
filename for burn-in output (default: None).
fchain : str, optional
filename for MCMC chain output (default: None).
flogp : str, optional
filename for logp output (default: None).
thread : integer
Number of threads (default: 1).
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
"""
# initialize a multi-dim random number array
p0 = np.random.rand(nwalkers*self.ndim).reshape(nwalkers, self.ndim)
# initial values of sigma to be scattering around cont_std
p0[:,0] = p0[:,0] - 0.5 + np.log(self.cont_std)
# initial values of tau filling 0 - 0.5rj
p0[:,1] = np.log(self.rj*0.5*p0[:,1])
if self.covfunc == "pow_exp" :
p0[:,2] = p0[:,2] * 1.99
elif self.covfunc == "matern" :
p0[:,2] = np.log(p0[:,2] * 5)
elif self.covfunc == "kepler2_exp" :
p0[:,2] = np.log(self.rj*0.1*p0[:,2])
if set_verbose :
print("start burn-in")
print("nburn: %d nwalkers: %d --> number of burn-in iterations: %d"%
(nburn, nwalkers, nburn*nwalkers))
sampler = EnsembleSampler(nwalkers, self.ndim, lnpostfn_single_p,
args=(self.zydata, self.covfunc, set_prior,
conthpd,
self.uselognu,
rank,
False,
False),
threads=threads)
pos, prob, state = sampler.run_mcmc(p0, nburn)
if set_verbose :
print("burn-in finished")
if fburn is not None :
if set_verbose :
print("save burn-in chains to %s"%fburn)
np.savetxt(fburn, sampler.flatchain)
# reset sampler
sampler.reset()
if set_verbose :
print("start sampling")
sampler.run_mcmc(pos, nchain, rstate0=state)
if set_verbose :
print("sampling finished")
af = sampler.acceptance_fraction
if set_verbose :
print("acceptance fractions for all walkers are")
print(" ".join([format(r, "3.2f") for r in af]))
if fchain is not None :
if set_verbose :
print("save MCMC chains to %s"%fchain)
np.savetxt(fchain, sampler.flatchain)
if flogp is not None :
if set_verbose :
print("save logp of MCMC chains to %s"%flogp)
np.savetxt(flogp, np.ravel(sampler.lnprobability), fmt='%16.8f')
# make chain an attritue
self.flatchain = sampler.flatchain
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
def get_hpd(self, set_verbose=True):
""" Get the 68% percentile range of each parameter to self.hpd.
Parameters
----------
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
"""
hpd = np.zeros((3, self.ndim))
chain_len = self.flatchain.shape[0]
pct1sig = chain_len*np.array([0.16, 0.50, 0.84])
medlowhig = pct1sig.astype(np.int32)
for i in xrange(self.ndim):
vsort = np.sort(self.flatchain[:,i])
hpd[:,i] = vsort[medlowhig]
if set_verbose :
print("HPD of %s"%self.vars[i])
if (self.vars[i] == "nu" and (not self.uselognu)) :
print("low: %8.3f med %8.3f hig %8.3f"%tuple(hpd[:,i]))
else :
print("low: %8.3f med %8.3f hig %8.3f"%tuple(np.exp(hpd[:,i])))
# register hpd to attr
self.hpd = hpd
def show_hist(self, bins=100, figout=None, figext=None):
""" Display histograms of the posterior distributions.
Parameters
----------
bins: integer, optional
Number of bins (default:100).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
ln10 = np.log(10.0)
fig = plt.figure(figsize=(8, 5))
for i in xrange(self.ndim) :
ax = fig.add_subplot(1,self.ndim,i+1)
if (self.vars[i] == "nu" and (not self.uselognu)) :
ax.hist(self.flatchain[:,i], bins)
if self.covfunc == "kepler2_exp" :
ax.axvspan(self.cont_cad_min,
self.cont_cad, color="g", alpha=0.2)
else :
ax.hist(self.flatchain[:,i]/ln10, bins)
if self.vars[i] == "nu" and self.covfunc == "kepler2_exp" :
ax.axvspan(np.log10(self.cont_cad_min),
np.log10(self.cont_cad), color="g", alpha=0.2)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
#plt.get_current_fig_manager().toolbar.zoom()
return(figure_handler(fig=fig, figout=figout, figext=figext))
def load_chain(self, fchain, set_verbose=True):
""" Load an existing chain file.
Parameters
----------
fchain : str
filename for MCMC chain input.
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
"""
if set_verbose :
print("load MCMC chain from %s"%fchain)
self.flatchain = np.genfromtxt(fchain)
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
def break_chain(self, covpar_segments):
""" Break the chain into different segments.
Parameters
----------
covpar_segments : list of lists.
list with length that equals the number of dimensions of the parameter space.
"""
if (len(covpar_segments) != self.ndim) :
print("Error: covpar_segments has to be a list of length %d"%(self.ndim))
return(1)
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
for i, covpar_seq in enumerate(covpar_segments) :
if covpar_seq is None:
continue
indx = np.argsort(self.flatchain[:, i])
imin, imax = np.searchsorted(self.flatchain[indx, i], covpar_seq)
indx_cut = indx[imin : imax]
if len(indx_cut) < 10 :
print("Warning: cut too aggressive!")
return(1)
self.flatchain = self.flatchain[indx_cut, :]
def restore_chain(self) :
""" Restore chain after `break_chain`.
"""
self.flatchain = np.copy(self.flatchain_whole)
def get_qlist(self, p_bst) :
""" get the best-fit linear responses.
Parameters
----------
p_bst: list
best-fit parameters.
"""
self.qlist = lnpostfn_single_p(p_bst, self.zydata, self.covfunc,
uselognu=self.uselognu, rank="Full", set_retq=True)[4]
def do_pred(self, p_bst, fpred=None, dense=10, rank="Full", set_overwrite=True) :
""" Predict light curves using the best-fit parameters.
Parameters
----------
p_bst: list
best-fit parameters.
fpred: str, optional
filename for saving the predicted light curves.
dense: integer, optional
factors by which the desired sampling is compared to the original data sampling (default: 10).
rank: str, optional
Type of covariance matrix rank, "Full" or "NearlyFull" (default: "Full").
set_overwrite: bool, optional
Whether to overwrite, if a `fpred` file already exists.
Returns
-------
zypred : LightCurve data.
Predicted LightCurve.
"""
self.get_qlist(p_bst)
self.zydata.update_qlist(self.qlist)
sigma, tau, nu = unpacksinglepar(p_bst, self.covfunc, uselognu=self.uselognu)
lcmean=self.zydata.blist[0]
P = PredictSignal(zydata=self.zydata, lcmean=lcmean,
rank=rank, covfunc=self.covfunc,
sigma=sigma, tau=tau, nu=nu)
nwant = dense*self.cont_npt
jwant0 = self.jstart - 0.1*self.rj
jwant1 = self.jend + 0.1*self.rj
jwant = np.linspace(jwant0, jwant1, nwant)
mve, var = P.mve_var(jwant)
sig = np.sqrt(var)
zylclist_pred = [[jwant, mve, sig],]
zydata_pred = LightCurve(zylclist_pred)
if fpred is not None :
zydata_pred.save(fpred, set_overwrite=set_overwrite)
return(zydata_pred)
###########################################################
""" Rmap_Model: Spectroscopic RM """
def unpackspearpar(p, nlc=None, hascontlag=False) :
""" Internal Function: unpack the physical parameters from input 1-d array for spear mode.
"""
if nlc is None:
# possible to figure out nlc from the size of p
nlc = (len(p) - 2)//3 + 1
sigma = np.exp(p[0])
tau = np.exp(p[1])
if hascontlag :
lags = np.zeros(nlc)
wids = np.zeros(nlc)
scales = np.ones(nlc)
for i in xrange(1, nlc) :
lags[i] = p[2+(i-1)*3]
wids[i] = p[3+(i-1)*3]
scales[i] = p[4+(i-1)*3]
return(sigma, tau, lags, wids, scales)
else :
llags = np.zeros(nlc-1)
lwids = np.zeros(nlc-1)
lscales = np.ones(nlc-1)
for i in xrange(nlc-1) :
llags[i] = p[2+i*3]
lwids[i] = p[3+i*3]
lscales[i] = p[4+i*3]
return(sigma, tau, llags, lwids, lscales)
def lnpostfn_spear_p(p, zydata, conthpd=None, lagtobaseline=0.3, laglimit=None, set_threading=False, blocksize=10000, set_retq=False, set_verbose=False):
""" log-posterior function of p.
Parameters
----------
p : array_like
Rmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1,
...]
zydata: LightCurve object
Input LightCurve data.
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log(sigma_low), log(tau_low)],
[log(sigma_med), log(tau_med)],
[log(sigma_hig), log(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. conthpd usually comes in as an attribute
of the `Cont_Model` object `hpd` (default: None).
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: str or list of tuples.
hard boundaries for the lag searching during MCMC sampling.
'baseline' means the boundaries are naturally determined by the
duration of the light curves, or you can set them as a list with
`nline` of tuples, with each tuple containing the (min, max) pair
for each single line.
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Rmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_retq: bool, optional
Return the value(s) of q along with each component of the
log-likelihood if True (default: False).
set_verbose: bool, optional
True if you want verbosity (default: False).
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component, DC_penalty, correction_to_the_mean].
"""
# unpack the parameters from p
sigma, tau, llags, lwids, lscales = unpackspearpar(p, zydata.nlc,
hascontlag=False)
if set_retq :
vals = list(lnlikefn_spear(zydata, sigma, tau, llags, lwids, lscales,
set_retq=True, set_verbose=set_verbose,
set_threading=set_threading, blocksize=blocksize))
else :
logl = lnlikefn_spear(zydata, sigma, tau, llags, lwids, lscales,
set_retq=False, set_verbose=set_verbose,
set_threading=set_threading, blocksize=blocksize)
# conthpd is in natural log
if conthpd is not None :
# for sigma
if p[0] < conthpd[1,0] :
prior0 = (p[0] - conthpd[1,0])/(conthpd[1,0]-conthpd[0,0])
else :
prior0 = (p[0] - conthpd[1,0])/(conthpd[2,0]-conthpd[1,0])
# for tau
if p[1] < conthpd[1,1] :
prior1 = (p[1] - conthpd[1,1])/(conthpd[1,1]-conthpd[0,1])
else :
prior1 = (p[1] - conthpd[1,1])/(conthpd[2,1]-conthpd[1,1])
else :
prior0 = 0.0
prior1 = 0.0
# for each lag
prior2 = 0.0
for i in xrange(zydata.nlc-1) :
if lagtobaseline < 1.0 :
if np.abs(llags[i]) > lagtobaseline*zydata.rj :
# penalize long lags when they are larger than 0.3 times the baseline,
# as it is too easy to fit the model with non-overlapping
# signals in the light curves.
prior2 += np.log(np.abs(llags[i])/(lagtobaseline*zydata.rj))
# penalize long lags to be impossible
if laglimit is not None :
if llags[i] > laglimit[i][1] or llags[i] < laglimit[i][0] :
# try not stack priors
prior2 = my_pos_inf
# add logp of all the priors
prior = -0.5*(prior0*prior0+prior1*prior1) - prior2
if set_retq :
vals[0] = vals[0] + prior
vals.extend([prior0, prior1, prior2])
return(vals)
else :
logp = logl + prior
return(logp)
def lnlikefn_spear(zydata, sigma, tau, llags, lwids, lscales, set_retq=False, set_verbose=False, set_threading=False, blocksize=10000):
""" Internal function to calculate the log likelihood.
"""
if zydata.issingle:
raise UsageError("lnlikefn_spear does not work for single mode")
# impossible scenarios
if (sigma<=0.0 or tau<=0.0 or np.min(lwids)<0.0 or np.min(lscales)<=0.0
or np.max(np.abs(llags))>zydata.rj) :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters",
set_verbose=set_verbose))
# fill in lags/wids/scales
lags = np.zeros(zydata.nlc)
wids = np.zeros(zydata.nlc)
scales = np.ones(zydata.nlc)
lags[1:] = llags
wids[1:] = lwids
scales[1:] = lscales
# calculate covariance matrix
if set_threading :
C = spear_threading(zydata.jarr,zydata.jarr,
zydata.iarr,zydata.iarr,sigma,tau,lags,wids,scales,
blocksize=blocksize)
else :
C = spear(zydata.jarr,zydata.jarr,
zydata.iarr,zydata.iarr,sigma,tau,lags,wids,scales)
# decompose C inplace
U, info = cholesky(C, nugget=zydata.varr, inplace=True, raiseinfo=False)
# handle exceptions here
if info > 0 :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance C",
set_verbose=set_verbose))
retval = _lnlike_from_U(U, zydata, set_retq=set_retq, set_verbose=set_verbose)
return(retval)
class Rmap_Model(object) :
def __init__(self, zydata=None) :
""" Rmap Model object.
Parameters
----------
zydata: LightCurve object, optional
Light curve data.
"""
self.zydata = zydata
if zydata is None :
pass
else :
self.nlc = zydata.nlc
self.npt = zydata.npt
self.cont_npt = zydata.nptlist[0]
self.cont_cad = zydata.cont_cad
self.cont_std = zydata.cont_std
self.rj = zydata.rj
self.jstart = zydata.jstart
self.jend = zydata.jend
self.names = zydata.names
# number of parameters
self.ndim = 2 + (self.nlc-1)*3
self.vars = [ "sigma", "tau" ]
self.texs = [r"$\log\,\sigma$", r"$\log\,\tau$"]
for i in xrange(1, self.nlc) :
self.vars.append("_".join(["lag", self.names[i]]))
self.vars.append("_".join(["wid", self.names[i]]))
self.vars.append("_".join(["scale", self.names[i]]))
self.texs.append( "".join([r"$t_{", self.names[i].lstrip(r"$").rstrip(r"$") ,r"}$"]))
self.texs.append( "".join([r"$w_{", self.names[i].lstrip(r"$").rstrip(r"$") ,r"}$"]))
self.texs.append( "".join([r"$s_{", self.names[i].lstrip(r"$").rstrip(r"$") ,r"}$"]))
def __call__(self, p, **lnpostparams) :
""" Calculate the posterior value given one parameter set `p`. See `lnpostfn_spear_p` for doc.
Parameters
----------
p : array_like
Rmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1, ...]
lnpostparams: kwargs
Keyword arguments for `lnpostfn_spear_p`.
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component, DC_penalty, correction_to_the_mean].
"""
return(lnpostfn_spear_p(p, self.zydata, **lnpostparams))
def do_map(self, p_ini, fixed=None, **lnpostparams) :
""" Do an optimization to find the Maximum a Posterior estimates. See `lnpostfn_spear_p` for doc.
Parameters
----------
p_ini: array_like
Rmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1, ...]
fixed: array_like, optional
Same dimension as p_ini, but with 0 for parameters that is fixed in
the optimization, and with 1 for parameters that is varying, e.g.,
fixed = [0, 1, 1, 1, 1, ...] means sigma is fixed while others are varying. fixed=[1, 1, 1, 1, 1, ...] is equivalent to fixed=None (default: None).
lnpostparams : kwargs
Kewword arguments for `lnpostfn_spear_p`.
Returns
-------
p_bst : array_like
Best-fit parameters.
l: float
The maximum log-posterior.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
set_retq = lnpostparams.pop("set_retq", False)
if set_retq is True :
raise InputError("set_retq has to be False")
p_ini = np.asarray(p_ini)
if fixed is not None :
fixed = np.asarray(fixed)
func = lambda _p : -lnpostfn_spear_p(_p*fixed+p_ini*(1.-fixed),
self.zydata, **lnpostparams)
else :
func = lambda _p : -lnpostfn_spear_p(_p,
self.zydata, **lnpostparams)
p_bst, v_bst = fmin(func, p_ini, full_output=True)[:2]
if fixed is not None :
p_bst = p_bst*fixed+p_ini*(1.-fixed)
sigma, tau, llags, lwids, lscales = unpackspearpar(p_bst,
self.zydata.nlc, hascontlag=False)
if set_verbose :
print("Best-fit parameters are")
print("sigma %8.3f tau %8.3f"%(sigma, tau))
for i in xrange(self.nlc-1) :
ip = 2+i*3
print("%s %8.3f %s %8.3f %s %8.3f"%(
self.vars[ip+0], llags[i],
self.vars[ip+1], lwids[i],
self.vars[ip+2], lscales[i],
))
print("with logp %10.5g "%-v_bst)
return(p_bst, -v_bst)
def do_mcmc(self, conthpd=None, lagtobaseline=0.3, laglimit="baseline", nwalkers=100, nburn=100, nchain=100, threads=1, fburn=None, fchain=None, flogp=None, set_threading=False, blocksize=10000, set_verbose=True):
""" Run MCMC sampling over the parameter space.
Parameters
----------
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log(sigma_low), log(tau_low)],
[log(sigma_med), log(tau_med)],
[log(sigma_hig), log(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. conthpd usually comes in as an attribute
of the `Cont_Model` object `hpd` (default: None).
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: str or list of tuples.
Hard boundaries for the lag searching during MCMC sampling. 'baseline' means the boundaries are naturally determined by the duration of the light curves, or you can set them as a list with `nline` of tuples, with each tuple containing the (min, max) pair for each single line.
nwalker : integer, optional
Number of walkers for `emcee` (default: 100).
nburn : integer, optional
Number of burn-in steps for `emcee` (default: 50).
nchain : integer, optional
Number of chains for `emcee` (default: 50).
thread : integer
Number of threads (default: 1).
fburn : str, optional
filename for burn-in output (default: None).
fchain : str, optional
filename for MCMC chain output (default: None).
flogp : str, optional
filename for logp output (default: None).
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Rmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
"""
if (threads > 1 and (not set_threading)):
if set_verbose:
print("run parallel chains of number %2d "%threads)
elif (threads == 1) :
if set_verbose:
if set_threading :
print("run single chain in submatrix blocksize %10d "%blocksize)
else :
print("run single chain without subdividing matrix ")
else :
raise InputError("conflicting set_threading and threads setup")
if laglimit == "baseline" :
laglimit = [[-self.rj, self.rj],]*(self.nlc-1)
elif len(laglimit) != (self.nlc - 1) :
raise InputError("laglimit should be a list of lists matching number of lines")
# generate array of random numbers
p0 = np.random.rand(nwalkers*self.ndim).reshape(nwalkers, self.ndim)
# initialize array
if conthpd is None:
p0[:, 0] += np.log(self.cont_std)-0.5
p0[:, 1] += np.log(np.sqrt(self.rj*self.cont_cad))-0.5
else :
p0[:, 0] += conthpd[1,0]-0.5
p0[:, 1] += conthpd[1,1]-0.5
for i in xrange(self.nlc-1) :
# p0[:, 2+i*3] = p0[:,2+i*3]*self.rj*lagtobaseline
p0[:, 2+i*3] = p0[:,2+i*3]*(laglimit[i][1]-laglimit[i][0]) + laglimit[i][0]
if set_verbose :
print("start burn-in")
if conthpd is None :
print("no priors on sigma and tau")
else :
print("using priors on sigma and tau from the continuum fitting")
print(np.exp(conthpd))
if lagtobaseline < 1.0 :
print("penalize lags longer than %3.2f of the baseline"%lagtobaseline)
else :
print("no penalizing long lags, but only restrict to within the baseline")
print("nburn: %d nwalkers: %d --> number of burn-in iterations: %d"%
(nburn, nwalkers, nburn*nwalkers))
# initialize the ensemble sampler
sampler = EnsembleSampler(nwalkers, self.ndim,
lnpostfn_spear_p,
args=(self.zydata, conthpd, lagtobaseline, laglimit,
set_threading, blocksize, False, False),
threads=threads)
pos, prob, state = sampler.run_mcmc(p0, nburn)
if set_verbose :
print("burn-in finished")
if fburn is not None :
if set_verbose :
print("save burn-in chains to %s"%fburn)
np.savetxt(fburn, sampler.flatchain)
# reset the sampler
sampler.reset()
if set_verbose :
print("start sampling")
sampler.run_mcmc(pos, nchain, rstate0=state)
if set_verbose :
print("sampling finished")
af = sampler.acceptance_fraction
if set_verbose :
print("acceptance fractions are")
print(" ".join([format(r, "3.2f") for r in af]))
if fchain is not None :
if set_verbose :
print("save MCMC chains to %s"%fchain)
np.savetxt(fchain, sampler.flatchain)
if flogp is not None :
if set_verbose :
print("save logp of MCMC chains to %s"%flogp)
np.savetxt(flogp, np.ravel(sampler.lnprobability), fmt='%16.8f')
# make chain an attritue
self.flatchain = sampler.flatchain
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
def get_hpd(self, set_verbose=True):
""" Get the 68% percentile range of each parameter to self.hpd.
Parameters
----------
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
hpd = np.zeros((3, self.ndim))
chain_len = self.flatchain.shape[0]
pct1sig = chain_len*np.array([0.16, 0.50, 0.84])
medlowhig = pct1sig.astype(np.int32)
for i in xrange(self.ndim):
vsort = np.sort(self.flatchain[:,i])
hpd[:,i] = vsort[medlowhig]
if set_verbose :
print("HPD of %s"%self.vars[i])
if i < 2 :
print("low: %8.3f med %8.3f hig %8.3f"%tuple(np.exp(hpd[:,i])))
else :
print("low: %8.3f med %8.3f hig %8.3f"%tuple(hpd[:,i]))
# register hpd to attr
self.hpd = hpd
def show_hist(self, bins=100, lagbinsize=1.0, figout=None, figext=None):
""" Display histograms of the posterior distributions.
Parameters
----------
bins: integer, optional
Number of bins for parameters except for 'lag' (default:100).
lagbinsize: integer, optional
bin width for 'lag' (default:100).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
ln10 = np.log(10.0)
fig = plt.figure(figsize=(14, 2.8*self.nlc))
for i in xrange(2) :
ax = fig.add_subplot(self.nlc,3,i+1)
ax.hist(self.flatchain[:,i]/ln10, bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
for k in xrange(self.nlc-1):
for i in xrange(2+k*3, 5+k*3) :
ax = fig.add_subplot(self.nlc,3,i+1+1)
if np.mod(i, 3) == 2 :
# lag plots
lagbins = np.arange(int(np.min(self.flatchain[:,i])),
int(np.max(self.flatchain[:,i]))+lagbinsize, lagbinsize)
ax.hist(self.flatchain[:,i], bins=lagbins)
else :
ax.hist(self.flatchain[:,i], bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
return(figure_handler(fig=fig, figout=figout, figext=figext))
def break_chain(self, llag_segments):
""" Break the chain.
Parameters
----------
llag_segments: list of lists
list of length self.nlc-1, wich each element a two-element array
bracketing the range of lags (usually the single most probable peak)
you want to consider for each line.
"""
if (len(llag_segments) != self.nlc-1) :
print("Error: llag_segments has to be a list of length %d"%(self.nlc-1))
return(1)
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
for i, llag_seq in enumerate(llag_segments) :
if llag_seq is None:
continue
indx = np.argsort(self.flatchain[:, 2+i*3])
imin, imax = np.searchsorted(self.flatchain[indx, 2+i*3], llag_seq)
indx_cut = indx[imin : imax]
self.flatchain = self.flatchain[indx_cut, :]
def restore_chain(self) :
""" Restore chain after `break_chain`.
"""
self.flatchain = np.copy(self.flatchain_whole)
def load_chain(self, fchain, set_verbose=True):
""" Load stored MCMC chain.
Parameters
----------
fchain: string
Name for the chain file.
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
if set_verbose :
print("load MCMC chain from %s"%fchain)
self.flatchain = np.genfromtxt(fchain)
self.flatchain_whole = np.copy(self.flatchain)
self.ndim = self.flatchain.shape[1]
# get HPD
self.get_hpd(set_verbose=set_verbose)
def get_qlist(self, p_bst):
""" get the best-fit linear responses.
Parameters
----------
p_bst: list
best-fit parameters.
"""
self.qlist = lnpostfn_spear_p(p_bst, self.zydata, set_retq=True, set_verbose=False)[4]
def do_pred(self, p_bst, fpred=None, dense=10, set_overwrite=True) :
""" Calculate the predicted mean and variance of each light curve on a
densely sampled time axis.
Parameters
----------
p_bst: array_like
Input paraemeters.
fpred: string, optional
Name of the output file for the predicted light curves, set it to
None if you do not want output (default: None).
dense: int, optional
The factor by which the predicted light curves should be more
densely sampled than the original data (default: 10).
set_overwrite: bool, optional
True if you want to overwrite existing fpred (default: True).
Returns
-------
zydata_pred: LightCurve object
Predicted light curves packaged as a LightCurve object.
"""
self.get_qlist(p_bst)
sigma, tau, lags, wids, scales = unpackspearpar(p_bst,
self.zydata.nlc, hascontlag=True)
# update qlist
self.zydata.update_qlist(self.qlist)
# initialize PredictRmap object
P = PredictRmap(zydata=self.zydata, sigma=sigma, tau=tau,
lags=lags, wids=wids, scales=scales)
nwant = dense*self.cont_npt
jwant0 = self.jstart - 0.1*self.rj
jwant1 = self.jend + 0.1*self.rj
jwant = np.linspace(jwant0, jwant1, nwant)
zylclist_pred = []
for i in xrange(self.nlc) :
iwant = np.ones(nwant)*(i+1)
mve, var = P.mve_var(jwant, iwant)
sig = np.sqrt(var)
zylclist_pred.append([jwant, mve, sig])
zydata_pred = LightCurve(zylclist_pred)
if fpred is not None :
zydata_pred.save(fpred, set_overwrite=set_overwrite)
return(zydata_pred)
###########################################################
""" Pmap_Model: Two-Band Spectroscopic RM """
def unpackphotopar(p, nlc=2, hascontlag=False) :
""" Unpack the physical parameters from input 1-d array for photo mode.
Currently only two bands, one on and on off the line emission.
"""
if nlc != 2 :
raise InputError("Pmap_Model cannot cope with more than two bands yet")
sigma = np.exp(p[0])
tau = np.exp(p[1])
if hascontlag :
lags = np.zeros(3)
wids = np.zeros(3)
scales = np.ones(3)
# line contribution
lags[1] = p[2]
wids[1] = p[3]
scales[1] = p[4]
# continuum contribution
scales[2] = p[5]
return(sigma, tau, lags, wids, scales)
else :
llags = np.zeros(2)
lwids = np.zeros(2)
lscales = np.ones(2)
llags[0] = p[2]
lwids[0] = p[3]
lscales[0] = p[4]
# continuum contribution
lscales[1] = p[5]
return(sigma, tau, llags, lwids, lscales)
def lnpostfn_photo_p(p, zydata, conthpd=None, set_extraprior=False, lagtobaseline=0.3, laglimit=None, widtobaseline=0.2, widlimit=None, set_threading=False, blocksize=10000, set_retq=False, set_verbose=False):
""" log-posterior function of p.
Parameters
----------
p : array_like
Pmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1, alpha]
zydata: LightCurve object
Light curve data.
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log(sigma_low), log(tau_low)],
[log(sigma_med), log(tau_med)],
[log(sigma_hig), log(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. conthpd usually comes in as an attribute
of the DRW_Model object DRW_Model.hpd (default: None).
set_extraprior: bool, optional
DEPRECATED, keep it for backward compatibilit and debugging purposes.
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: list of tuples.
hard boundaries for the lag searching.
widtobaseline: float, optional
Prior on wids. When input wid exceeds widtobaseline*baseline, a
logarithmic prior will be applied.
widlimit: list of tuples, optional
hard boundaries for the wid searching.
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Pmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_retq: bool, optional
Return the value(s) of q along with each component of the
log-likelihood if True (default: False).
set_verbose: bool, optional
True if you want verbosity (default: False).
"""
# unpack the parameters from p
sigma, tau, llags, lwids, lscales = unpackphotopar(p, zydata.nlc, hascontlag=False)
if set_retq :
vals = list(lnlikefn_photo(zydata, sigma, tau, llags, lwids, lscales,
set_retq=True, set_verbose=set_verbose,
set_threading=set_threading, blocksize=blocksize))
else :
logl = lnlikefn_photo(zydata, sigma, tau, llags, lwids, lscales,
set_retq=False, set_verbose=set_verbose,
set_threading=set_threading, blocksize=blocksize)
# conthpd is in natural log
if conthpd is not None :
# for sigma
if p[0] < conthpd[1,0] :
prior0 = (p[0] - conthpd[1,0])/(conthpd[1,0]-conthpd[0,0])
else :
prior0 = (p[0] - conthpd[1,0])/(conthpd[2,0]-conthpd[1,0])
# for tau
if p[1] < conthpd[1,1] :
prior1 = (p[1] - conthpd[1,1])/(conthpd[1,1]-conthpd[0,1])
else :
prior1 = (p[1] - conthpd[1,1])/(conthpd[2,1]-conthpd[1,1])
else :
prior0 = 0.0
prior1 = 0.0
# for each lag
prior2 = 0.0
if lagtobaseline < 1.0 :
if np.abs(llags[0]) > lagtobaseline*zydata.rj :
# penalize long lags when they are larger than 0.3 times the baseline,
# as it is too easy to fit the model with non-overlapping
# signals in the light curves.
prior2 += np.log(np.abs(llags[0])/(lagtobaseline*zydata.rj))
# penalize long lags to be impossible
if laglimit is not None :
if llags[0] > laglimit[0][1] or llags[0] < laglimit[0][0] :
prior2 += my_pos_inf
# penalize on extremely large transfer function width
if widtobaseline < 1.0 :
if np.abs(lwids[0]) > widtobaseline*zydata.rj :
prior2 += np.log(np.abs(lwids[0])/(widtobaseline*zydata.rj))
if widlimit is not None :
if lwids[0] > widlimit[0][1] or lwids[0] < widlimit[0][0] :
prior2 += my_pos_inf
# if np.abs(lwids[0]) >= zydata.cont_cad :
# prior2 += np.log(np.abs(lwids[0])/zydata.cont_cad)
# else :
# prior2 += np.log(zydata.cont_cad/np.abs(lwids[0]))
if set_extraprior :
# XXX {{{Extra penalizations.
# penalize on extremely short lags (below median cadence).
if np.abs(llags[0]) <= zydata.cont_cad or np.abs(llags[0]) <= np.abs(lwids[0]) :
prior2 += my_pos_inf
# penalize on extremely small line responses (below mean error level).
if sigma * np.abs(lscales[0]) <= np.mean(zydata.elist[1]) :
prior2 += my_pos_inf
# }}}
# add logp of all the priors
prior = -0.5*(prior0*prior0+prior1*prior1) - prior2
# print p
# print prior
if set_retq :
vals[0] = vals[0] + prior
vals.extend([prior0, prior1, prior2])
return(vals)
else :
logp = logl + prior
return(logp)
def lnlikefn_photo(zydata, sigma, tau, llags, lwids, lscales, set_retq=False, set_verbose=False, set_threading=False, blocksize=10000):
""" Log-likelihood function.
"""
if zydata.issingle:
raise UsageError("lnlikefn_photo does not work for single mode")
# impossible scenarios
if (sigma<=0.0 or tau<=0.0 or np.min(lwids)<0.0 or np.min(lscales)<0.0
or np.max(np.abs(llags))>zydata.rj) :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters",
set_verbose=set_verbose))
# set_pmap = True
# fill in lags/wids/scales
lags = np.zeros(3)
wids = np.zeros(3)
scales = np.ones(3)
lags[1 :] = llags[:]
wids[1 :] = lwids[:]
scales[1 :] = lscales[:]
if set_threading :
C = spear_threading(zydata.jarr,zydata.jarr,
zydata.iarr,zydata.iarr,sigma,tau,lags,wids,scales,
set_pmap=True, blocksize=blocksize)
else :
C = spear(zydata.jarr,zydata.jarr,
zydata.iarr,zydata.iarr,sigma,tau,lags,wids,scales,
set_pmap=True)
# decompose C inplace
U, info = cholesky(C, nugget=zydata.varr, inplace=True, raiseinfo=False)
# handle exceptions here
if info > 0 :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance C",
set_verbose=set_verbose))
retval = _lnlike_from_U(U, zydata, set_retq=set_retq, set_verbose=set_verbose)
return(retval)
class Pmap_Model(object) :
def __init__(self, zydata=None, linename="line") :
""" Pmap Model object.
Parameters
----------
zydata: LightCurve object, optional
Light curve data.
linename: str, optional
Name of the emission line (default: 'line').
"""
self.zydata = zydata
if zydata is None :
pass
else :
self.nlc = zydata.nlc
self.npt = zydata.npt
self.cont_npt = zydata.nptlist[0]
self.cont_cad = zydata.cont_cad
self.cont_std = zydata.cont_std
self.rj = zydata.rj
self.jstart = zydata.jstart
self.jend = zydata.jend
self.names = zydata.names
# number of parameters
self.ndim = 6
self.vars = [ "sigma", "tau" ]
self.texs = [r"$\log\,\sigma$", r"$\log\,\tau$"]
#
self.vars.append("_".join(["lag", linename]))
self.vars.append("_".join(["wid", linename]))
self.vars.append("_".join(["scale", linename]))
self.texs.append( "".join([r"$t_{", linename ,r"}$"]))
self.texs.append( "".join([r"$w_{", linename ,r"}$"]))
self.texs.append( "".join([r"$s_{", linename ,r"}$"]))
#
self.vars.append("alpha")
self.texs.append(r"$\alpha$")
def __call__(self, p, **lnpostparams) :
""" Calculate the posterior value given one parameter set `p`.
Parameters
----------
p : array_like
Pmap_Model parameters, [log(sigma), log(tau), lag, wid, scale, alpha].
lnpostparams: kwargs
Kewword arguments for `lnpostfn_photo_p`.
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component, DC_penalty, correction_to_the_mean].
"""
return(lnpostfn_photo_p(p, self.zydata, **lnpostparams))
def do_map(self, p_ini, fixed=None, **lnpostparams) :
""" Do an optimization to find the Maximum a Posterior estimates.
Parameters
----------
p_ini: array_like
Pmap_Model parameters [log(sigma), log(tau), lag, wid, scale, alpha].
fixed: array_like, optional
Same dimension as p_ini, but with 0 for parameters that is fixed in
the optimization, and with 1 for parameters that is varying, e.g.,
fixed = [0, 1, 1, 1, 1, 1] means sigma is fixed while others are varying. fixed=[1,
1, 1, 1, 1,] is equivalent to fixed=None (default:
None).
Returns
-------
p_bst : array_like
Best-fit parameters.
l: float
The maximum log-posterior.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
set_retq = lnpostparams.pop("set_retq", False)
if set_retq is True :
raise InputError("set_retq has to be False")
p_ini = np.asarray(p_ini)
if fixed is not None :
fixed = np.asarray(fixed)
func = lambda _p : -lnpostfn_photo_p(_p*fixed+p_ini*(1.-fixed),
self.zydata, **lnpostparams)
else :
func = lambda _p : -lnpostfn_photo_p(_p,
self.zydata, **lnpostparams)
p_bst, v_bst = fmin(func, p_ini, full_output=True)[:2]
if fixed is not None :
p_bst = p_bst*fixed+p_ini*(1.-fixed)
sigma, tau, llags, lwids, lscales = unpackphotopar(p_bst, self.zydata.nlc, hascontlag=False)
if set_verbose :
print("Best-fit parameters are")
print("sigma %8.3f tau %8.3f"%(sigma, tau))
print("%s %8.3f %s %8.3f %s %8.3f"%(
self.vars[2], llags[0],
self.vars[3], lwids[0],
self.vars[4], lscales[0],
))
print("alpha %8.3f"%(lscales[1]))
print("with logp %10.5g "%-v_bst)
return(p_bst, -v_bst)
def do_mcmc(self, conthpd=None, set_extraprior=False, lagtobaseline=0.3, laglimit="baseline", widtobaseline=0.2, widlimit="nyquist", nwalkers=100, nburn=100, nchain=100, threads=1, fburn=None, fchain=None, flogp=None, set_threading=False, blocksize=10000, set_verbose=True):
""" See `lnpostfn_photo_p` for doc, except for `laglimit` and `widlimit`,
both of which have different default values ('baseline' / 'nyquist').
'baseline' means the boundaries are naturally determined by the
duration of the light curves, and 'nyquist' means the transfer function
width has to be within two times the typical cadence of light curves.
"""
if (threads > 1 and (not set_threading)):
if set_verbose:
print("run parallel chains of number %2d "%threads)
elif (threads == 1) :
if set_verbose:
if set_threading :
print("run single chain in submatrix blocksize %10d "%blocksize)
else :
print("run single chain without subdividing matrix ")
else :
raise InputError("conflicting set_threading and threads setup")
if laglimit == "baseline" :
laglimit = [[-self.rj, self.rj],]
elif len(laglimit) != 1 :
raise InputError("laglimit should be a list of a single list")
if widlimit == "nyquist" :
# two times the cadence, resembling Nyquist sampling.
widlimit = [[0.0, 2.0*self.cont_cad],]
elif len(widlimit) != 1 :
raise InputError("widlimit should be a list of a single list")
# generate array of random numbers
p0 = np.random.rand(nwalkers*self.ndim).reshape(nwalkers, self.ndim)
# initialize array
if conthpd is None:
p0[:, 0] += np.log(self.cont_std)-0.5
p0[:, 1] += np.log(np.sqrt(self.rj*self.cont_cad))-0.5
else :
# XXX stretch the range from (0,1) to ( conthpd[0,0], conthpd[2,0] )
p0[:, 0] = p0[:, 0] * (conthpd[2,0] - conthpd[0,0]) + conthpd[0,0]
p0[:, 1] = p0[:, 1] * (conthpd[2,1] - conthpd[0,1]) + conthpd[0,1]
# old way, just use 0.5 as the 1\sigma width.
# p0[:, 0] += conthpd[1,0]-0.5
# p0[:, 1] += conthpd[1,1]-0.5
p0[:, 2] = p0[:,2]*(laglimit[0][1]-laglimit[0][0]) + laglimit[0][0]
p0[:, 3] = p0[:,3]*(widlimit[0][1]-widlimit[0][0]) + widlimit[0][0]
if set_verbose :
print("start burn-in")
if conthpd is None :
print("no priors on sigma and tau")
else :
print("using priors on sigma and tau from the continuum fitting")
print(np.exp(conthpd))
if lagtobaseline < 1.0 :
print("penalize lags longer than %3.2f of the baseline"%lagtobaseline)
else :
print("no penalizing long lags, but only restrict to within the baseline")
print("nburn: %d nwalkers: %d --> number of burn-in iterations: %d"%
(nburn, nwalkers, nburn*nwalkers))
# initialize the ensemble sampler
sampler = EnsembleSampler(nwalkers, self.ndim,
lnpostfn_photo_p,
args=(self.zydata, conthpd, set_extraprior,
lagtobaseline, laglimit, widtobaseline, widlimit,
set_threading, blocksize, False, False),
threads=threads)
pos, prob, state = sampler.run_mcmc(p0, nburn)
if set_verbose :
print("burn-in finished")
if fburn is not None :
if set_verbose :
print("save burn-in chains to %s"%fburn)
np.savetxt(fburn, sampler.flatchain)
# reset the sampler
sampler.reset()
if set_verbose :
print("start sampling")
sampler.run_mcmc(pos, nchain, rstate0=state)
if set_verbose :
print("sampling finished")
af = sampler.acceptance_fraction
if set_verbose :
print("acceptance fractions are")
print(" ".join([format(r, "3.2f") for r in af]))
if fchain is not None :
if set_verbose :
print("save MCMC chains to %s"%fchain)
np.savetxt(fchain, sampler.flatchain)
if flogp is not None :
if set_verbose :
print("save logp of MCMC chains to %s"%flogp)
np.savetxt(flogp, np.ravel(sampler.lnprobability), fmt='%16.8f')
# make chain an attritue
self.flatchain = sampler.flatchain
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
def get_hpd(self, set_verbose=True):
""" Get the 68% percentile range of each parameter to self.hpd.
Parameters
----------
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
hpd = np.zeros((3, self.ndim))
chain_len = self.flatchain.shape[0]
pct1sig = chain_len*np.array([0.16, 0.50, 0.84])
medlowhig = pct1sig.astype(np.int32)
for i in xrange(self.ndim):
vsort = np.sort(self.flatchain[:,i])
hpd[:,i] = vsort[medlowhig]
if set_verbose :
print("HPD of %s"%self.vars[i])
if i < 2 :
print("low: %8.3f med %8.3f hig %8.3f"%tuple(np.exp(hpd[:,i])))
else :
print("low: %8.3f med %8.3f hig %8.3f"%tuple(hpd[:,i]))
# register hpd to attr
self.hpd = hpd
def show_hist(self, bins=100, lagbinsize=1.0, figout=None, figext=None):
""" Display histograms of the posterior distributions.
Parameters
----------
bins: integer, optional
Number of bins for parameters except for 'lag' (default:100).
lagbinsize: integer, optional
bin width for 'lag' (default:100).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
ln10 = np.log(10.0)
fig = plt.figure(figsize=(14, 2.8*self.nlc))
for i in xrange(2) :
ax = fig.add_subplot(self.nlc,3,i+1)
ax.hist(self.flatchain[:,i]/ln10, bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
# alpha
ax = fig.add_subplot(self.nlc,3,3)
ax.hist(self.flatchain[:,5], bins)
ax.set_xlabel(self.texs[5])
ax.set_ylabel("N")
# line
for i in xrange(2, 5) :
ax = fig.add_subplot(self.nlc,3,i+1+1)
if np.mod(i, 3) == 2 :
# lag plots
lagbins = np.arange(int(np.min(self.flatchain[:,i])),
int(np.max(self.flatchain[:,i]))+lagbinsize, lagbinsize)
ax.hist(self.flatchain[:,i], bins=lagbins)
else :
ax.hist(self.flatchain[:,i], bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
# plt.get_current_fig_manager().toolbar.zoom()
return(figure_handler(fig=fig, figout=figout, figext=figext))
def break_chain(self, llag_segments):
""" Break the chain.
Parameters
----------
llag_segments: list of lists
list of length 1, wich the single element a two-element array
bracketing the range of lags (usually the single most probable peak)
you want to consider for each line.
"""
if (len(llag_segments) != self.nlc-1) :
print("Error: llag_segments has to be a list of length %d"%(self.nlc-1))
return(1)
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
llag_seq = llag_segments[0]
if llag_seq is None:
print("Warning: no rule to break chains with")
else :
indx = np.argsort(self.flatchain[:, 2])
imin, imax = np.searchsorted(self.flatchain[indx, 2], llag_seq)
indx_cut = indx[imin : imax]
self.flatchain = self.flatchain[indx_cut, :]
def restore_chain(self) :
""" Restore chain after `break_chain`.
"""
self.flatchain = np.copy(self.flatchain_whole)
def load_chain(self, fchain, set_verbose=True):
""" Load stored MCMC chain.
Parameters
----------
fchain: string
Name for the chain file.
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
if set_verbose :
print("load MCMC chain from %s"%fchain)
self.flatchain = np.genfromtxt(fchain)
self.flatchain_whole = np.copy(self.flatchain)
self.ndim = self.flatchain.shape[1]
# get HPD
self.get_hpd(set_verbose=set_verbose)
def do_pred(self, p_bst, fpred=None, dense=10, set_overwrite=True, set_decompose=False) :
""" Calculate the predicted mean and variance of each light curve on a
densely sampled time axis.
Parameters
----------
p_bst: array_like
Input paraemeters.
fpred: string, optional
Name of the output file for the predicted light curves, set it to
None if you do not want output (default: None).
dense: int, optional
The factor by which the predicted light curves should be more
densely sampled than the original data (default: 10).
set_overwrite: bool, optional
True if you want to overwrite existing fpred (default: True).
Returns
-------
zydata_pred: LightCurve object
Predicted light curves packaged as a LightCurve object.
"""
qlist = lnpostfn_photo_p(p_bst, self.zydata, set_retq=True, set_verbose=False)[4]
sigma, tau, lags, wids, scales = unpackphotopar(p_bst,
self.zydata.nlc, hascontlag=True)
# update qlist
self.zydata.update_qlist(qlist)
# initialize PredictRmap object
P = PredictPmap(zydata=self.zydata, sigma=sigma, tau=tau,
lags=lags, wids=wids, scales=scales)
nwant = dense*self.cont_npt
jwant0 = self.jstart - 0.1*self.rj
jwant1 = self.jend + 0.1*self.rj
jwant = np.linspace(jwant0, jwant1, nwant)
zylclist_pred = []
for i in xrange(self.nlc) :
iwant = np.ones(nwant)*(i+1)
mve, var = P.mve_var(jwant, iwant)
sig = np.sqrt(var)
zylclist_pred.append([jwant, mve, sig])
if set_decompose :
mve_band = ( zylclist_pred[0][1] - self.zydata.blist[0] ) *scales[-1]
mve_line = ( zylclist_pred[1][1] - self.zydata.blist[1] ) - mve_band
mve_nonv = jwant * 0.0 + self.zydata.blist[1]
zydata_pred = LightCurve(zylclist_pred)
if fpred is not None :
zydata_pred.save(fpred, set_overwrite=set_overwrite)
if set_decompose :
return(zydata_pred, [jwant, mve_band, mve_line, mve_nonv])
else :
return(zydata_pred)
###########################################################
""" SPmap_Model: One-Band Photometric RM """
def unpacksbphotopar(p, nlc=1) :
""" Unpack the physical parameters from input 1-d array for single band photo mode.
"""
if nlc != 1 :
raise InputError("SPmap_Model cannot cope with more than one band.")
sigma = np.exp(p[0])
tau = np.exp(p[1])
lag = p[2]
wid = p[3]
scale = p[4]
return(sigma, tau, lag, wid, scale)
def lnpostfn_sbphoto_p(p, zydata, conthpd=None, scalehpd=None, lagtobaseline=0.3, laglimit=None, widtobaseline=0.2, widlimit=None, set_threading=False, blocksize=10000, set_retq=False, set_verbose=False) :
""" log-posterior function of p.
Parameters
----------
p : array_like
SPmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1]
zydata: LightCurve object
Light curve data.
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log(sigma_low), log(tau_low)],
[log(sigma_med), log(tau_med)],
[log(sigma_hig), log(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. conthpd usually comes in as an attribute
of the `Cont_Model` object `hpd` (default: None).
scalehpd: ndarray, optional
Prior on ln(scale) as an 1D ndarray with size 3.
np.array([lnscale_low, lnscale_med, lnscale_hig])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. Use scalehpd if you have a rough idea of
how large the ratio of line variation over the underlying continuum is.
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: list of tuples.
hard boundaries for the lag searching.
widtobaseline: float, optional
Prior on wids. When input wid exceeds widtobaseline*baseline, a
logarithmic prior will be applied.
widlimit: list of tuples, optional
hard boundaries for the wid searching.
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Pmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_retq: bool, optional
Return the value(s) of q along with each component of the
log-likelihood if True (default: False).
set_verbose: bool, optional
True if you want verbosity (default: False).
"""
sigma, tau, lag, wid, scale = unpacksbphotopar(p, zydata.nlc)
if set_retq :
vals = list(lnlikefn_sbphoto(zydata, sigma, tau, lag, wid, scale,
set_retq=True, set_verbose=set_verbose,
set_threading=set_threading, blocksize=blocksize))
else :
logl = lnlikefn_sbphoto(zydata, sigma, tau, lag, wid, scale,
set_retq=False, set_verbose=set_verbose,
set_threading=set_threading, blocksize=blocksize)
# both conthpd and p[1-2] are in natural log
if conthpd is not None :
# for sigma
if p[0] < conthpd[1,0] :
prior0 = (p[0] - conthpd[1,0])/(conthpd[1,0]-conthpd[0,0])
else :
prior0 = (p[0] - conthpd[1,0])/(conthpd[2,0]-conthpd[1,0])
# for tau
if p[1] < conthpd[1,1] :
prior1 = (p[1] - conthpd[1,1])/(conthpd[1,1]-conthpd[0,1])
else :
prior1 = (p[1] - conthpd[1,1])/(conthpd[2,1]-conthpd[1,1])
else :
prior0 = 0.0
prior1 = 0.0
# for scale
if scalehpd is not None :
lnscale = np.log(scale)
if lnscale < scalehpd[1] :
prior3 = (lnscale - scalehpd[1])/(scalehpd[1]-scalehpd[0])
else :
prior3 = (lnscale - scalehpd[1])/(scalehpd[2]-scalehpd[1])
else :
prior3 = 0.0
# for lags and wids
prior2 = 0.0
# penalize on extremely long lags.
if lagtobaseline < 1.0 :
if np.abs(lag) > lagtobaseline*zydata.rj :
prior2 += np.log(np.abs(lag)/(lagtobaseline*zydata.rj))
# penalize long lags to be impossible
if laglimit is not None :
if lag > laglimit[0][1] or lag < laglimit[0][0] :
prior2 += my_pos_inf
# penalize on extremely large transfer function width
if widtobaseline < 1.0 :
if np.abs(wid) > lagtobaseline*zydata.rj :
prior2 += np.log(np.abs(wid)/(lagtobaseline*zydata.rj))
if widlimit is not None :
if wid > widlimit[0][1] or wid < widlimit[0][0] :
prior2 += my_pos_inf
# add logp of all the priors
prior = -0.5*(prior0*prior0+prior1*prior1+prior3*prior3) - prior2
if set_retq :
vals[0] = vals[0] + prior
vals.extend([prior0, prior1, prior2])
return(vals)
else :
logp = logl + prior
return(logp)
def lnlikefn_sbphoto(zydata, sigma, tau, lag, wid, scale, set_retq=False, set_verbose=False, set_threading=False, blocksize=10000):
""" Log-likelihood function for the SBmap model.
"""
if not zydata.issingle:
raise UsageError("lnlikefn_sbphoto expects a single input light curve.")
# impossible scenarios
if (sigma<=0.0 or tau<=0.0 or wid<0.0 or scale<0.0 or lag>zydata.rj) :
return(_exit_with_retval(zydata.nlc, set_retq, errmsg="Warning: illegal input of parameters", set_verbose=set_verbose))
# fill in lags/wids/scales so that we can use spear.py with set_pmap=True.
lags = np.zeros(3)
wids = np.zeros(3)
scales = np.ones(3)
lags[1] = lag
wids[1] = wid
scales[1] = scale
# we know all elements in zydata.iarr are 1, so we want them to be 2 here.
if set_threading :
C = spear_threading(zydata.jarr,zydata.jarr,
zydata.iarr+1,zydata.iarr+1,sigma,tau,lags,wids,scales,
set_pmap=True, blocksize=blocksize)
else :
C = spear(zydata.jarr,zydata.jarr,
zydata.iarr+1,zydata.iarr+1,sigma,tau,lags,wids,scales,
set_pmap=True)
# decompose C inplace
U, info = cholesky(C, nugget=zydata.varr, inplace=True, raiseinfo=False)
# handle exceptions here
if info > 0 :
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance C",
set_verbose=set_verbose))
retval = _lnlike_from_U(U, zydata, set_retq=set_retq, set_verbose=set_verbose)
return(retval)
class SPmap_Model(object) :
def __init__(self, zydata=None, linename="line") :
""" SPmap Model object (Single-band Photometric mapping).
Parameters
----------
zydata: LightCurve object, optional
Light curve data.
"""
self.zydata = zydata
if zydata is None :
pass
else :
self.nlc = zydata.nlc
self.npt = zydata.npt
self.cont_npt = zydata.nptlist[0]
self.cont_cad = zydata.cont_cad
self.cont_std = zydata.cont_std
self.rj = zydata.rj
self.jstart = zydata.jstart
self.jend = zydata.jend
self.names = zydata.names
# test if all elements in zydata.iarr are one.
if not np.all(zydata.iarr == 1) :
raise UsageError("Element ids in zydata should all be ones.")
# number of parameters
self.ndim = 5
self.vars = [ "sigma", "tau" ]
self.texs = [r"$\log\,\sigma$", r"$\log\,\tau$"]
#
self.vars.append("_".join(["lag", linename]))
self.vars.append("_".join(["wid", linename]))
self.vars.append("_".join(["scale", linename]))
self.texs.append( "".join([r"$t_{", linename ,r"}$"]))
self.texs.append( "".join([r"$w_{", linename ,r"}$"]))
self.texs.append( "".join([r"$s_{", linename ,r"}$"]))
def __call__(self, p, **lnpostparams) :
return(lnpostfn_sbphoto_p(p, self.zydata, **lnpostparams))
def do_map(self, p_ini, fixed=None, **lnpostparams) :
""" Do an optimization to find the Maximum a Posterior estimates.
Parameters
----------
p_ini: array_like
Pmap_Model parameters [log(sigma), log(tau), lag, wid, scale].
fixed: array_like, optional
Same dimension as p_ini, but with 0 for parameters that is fixed in
the optimization, and with 1 for parameters that is varying, e.g.,
fixed = [0, 1, 1, 1, 1] means sigma is fixed while others are varying. fixed=[1,
1, 1, 1,] is equivalent to fixed=None (default: None).
Returns
-------
p_bst : array_like
Best-fit parameters.
l: float
The maximum log-posterior.
"""
set_verbose = lnpostparams.pop("set_verbose", True) # either given or set to True
set_retq = lnpostparams.pop("set_retq", False)
if set_retq is True :
raise InputError("set_retq has to be False")
p_ini = np.asarray(p_ini)
if fixed is not None :
fixed = np.asarray(fixed)
func = lambda _p : -lnpostfn_sbphoto_p(_p*fixed+p_ini*(1.-fixed), self.zydata, **lnpostparams)
else :
func = lambda _p : -lnpostfn_sbphoto_p(_p, self.zydata, **lnpostparams)
p_bst, v_bst = fmin(func, p_ini, full_output=True)[:2]
if fixed is not None :
p_bst = p_bst*fixed+p_ini*(1.-fixed)
sigma, tau, lag, wid, scale = unpacksbphotopar(p_bst, nlc=self.zydata.nlc)
if set_verbose :
print("Best-fit parameters are")
print("sigma %8.3f tau %8.3f"%(sigma, tau))
print("%s %8.3f %s %8.3f %s %8.3f"%(
self.vars[2], lag,
self.vars[3], wid,
self.vars[3], scale,
))
print("with logp %10.5g "%-v_bst)
return(p_bst, -v_bst)
def do_mcmc(self, conthpd=None, scalehpd=None, lagtobaseline=0.3, laglimit="baseline", widtobaseline=0.2, widlimit="nyquist", nwalkers=100, nburn=100, nchain=100, threads=1, fburn=None, fchain=None, flogp=None, set_threading=False, blocksize=10000, set_verbose=True):
""" See `lnpostfn_sbphoto_p` for doc, except for `laglimit` and `widlimit`,
both of which have different default values ('baseline' / 'nyquist').
'baseline' means the boundaries are naturally determined by the
duration of the light curves, and 'nyquist' means the transfer function
width has to be within two times the typical cadence of light curves.
"""
if (threads > 1 and (not set_threading)):
if set_verbose:
print("run parallel chains of number %2d "%threads)
elif (threads == 1) :
if set_verbose:
if set_threading :
print("run single chain in submatrix blocksize %10d "%blocksize)
else :
print("run single chain without subdividing matrix ")
else :
raise InputError("conflicting set_threading and threads setup: set_threading should be false when threads > 1")
if laglimit == "baseline" :
laglimit = [[-self.rj, self.rj],]
elif len(laglimit) != 1 :
raise InputError("laglimit should be a list of a single list")
if widlimit == "nyquist" :
# two times the cadence, resembling Nyquist sampling.
widlimit = [[0.0, 2.0*self.cont_cad],]
elif len(widlimit) != 1 :
raise InputError("widlimit should be a list of a single list")
# generate array of random numbers
p0 = np.random.rand(nwalkers*self.ndim).reshape(nwalkers, self.ndim)
# initialize array
if conthpd is None:
p0[:, 0] += np.log(self.cont_std)-0.5
p0[:, 1] += np.log(np.sqrt(self.rj*self.cont_cad))-0.5
else :
# XXX stretch the range from (0,1) to ( conthpd[0,0], conthpd[2,0] )
p0[:, 0] = p0[:, 0] * (conthpd[2,0] - conthpd[0,0]) + conthpd[0,0]
p0[:, 1] = p0[:, 1] * (conthpd[2,1] - conthpd[0,1]) + conthpd[0,1]
# old way, just use 0.5 as the 1\sigma width.
# p0[:, 0] += conthpd[1,0]-0.5
# p0[:, 1] += conthpd[1,1]-0.5
p0[:, 2] = p0[:, 2] * (laglimit[0][1] - laglimit[0][0]) + laglimit[0][0]
p0[:, 3] = p0[:, 3] * (widlimit[0][1] - widlimit[0][0]) + widlimit[0][0]
if scalehpd is None :
pass # (0, 1) is adequate.
else :
# XXX scalehpd is in natural log-space
p0[:, 4] = np.exp(p0[:, 4] * (scalehpd[2] - scalehpd[0]) + scalehpd[0])
if set_verbose :
print("start burn-in")
if conthpd is None :
print("no priors on sigma and tau")
else :
print("using log-priors on sigma and tau from the continuum fitting")
print(np.exp(conthpd))
if lagtobaseline < 1.0 :
print("penalize lags longer than %3.2f of the baseline"%lagtobaseline)
else :
print("no penalizing long lags, but only restrict to within laglimit")
if widtobaseline < 1.0 :
print("penalize wids longer than %3.2f of the baseline"%widtobaseline)
else :
print("no penalizing long wids, but only restrict to within widlimit")
if scalehpd is None :
print("no priors on scale")
else :
print("using log-priors on scale")
print(np.exp(scalehpd))
print("nburn: %d nwalkers: %d --> number of burn-in iterations: %d"% (nburn, nwalkers, nburn*nwalkers))
# initialize the ensemble sampler
sampler = EnsembleSampler(nwalkers, self.ndim, lnpostfn_sbphoto_p,
args=(self.zydata, conthpd, scalehpd,
lagtobaseline, laglimit, widtobaseline, widlimit,
set_threading, blocksize, False, False),
threads=threads)
pos, prob, state = sampler.run_mcmc(p0, nburn)
if set_verbose :
print("burn-in finished")
if fburn is not None :
if set_verbose :
print("save burn-in chains to %s"%fburn)
np.savetxt(fburn, sampler.flatchain)
# reset the sampler
sampler.reset()
if set_verbose :
print("start sampling")
sampler.run_mcmc(pos, nchain, rstate0=state)
if set_verbose :
print("sampling finished")
af = sampler.acceptance_fraction
if set_verbose :
print("acceptance fractions are")
print(" ".join([format(r, "3.2f") for r in af]))
if fchain is not None :
if set_verbose :
print("save MCMC chains to %s"%fchain)
np.savetxt(fchain, sampler.flatchain)
if flogp is not None :
if set_verbose :
print("save logp of MCMC chains to %s"%flogp)
np.savetxt(flogp, np.ravel(sampler.lnprobability), fmt='%16.8f')
# make chain an attritue
self.flatchain = sampler.flatchain
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
def get_hpd(self, set_verbose=True):
""" Get the 68% percentile range of each parameter to self.hpd.
Parameters
----------
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
hpd = np.zeros((3, self.ndim))
chain_len = self.flatchain.shape[0]
pct1sig = chain_len*np.array([0.16, 0.50, 0.84])
medlowhig = pct1sig.astype(np.int32)
for i in xrange(self.ndim):
vsort = np.sort(self.flatchain[:,i])
hpd[:,i] = vsort[medlowhig]
if set_verbose :
print("HPD of %s"%self.vars[i])
if i < 2 :
print("low: %8.3f med %8.3f hig %8.3f"%tuple(np.exp(hpd[:,i])))
else :
print("low: %8.3f med %8.3f hig %8.3f"%tuple(hpd[:,i]))
# register hpd to attr
self.hpd = hpd
def show_hist(self, bins=100, lagbinsize=1.0, figout=None, figext=None):
""" Display histograms of the posterior distributions.
Parameters
----------
bins: integer, optional
Number of bins for parameters except for 'lag' (default:100).
lagbinsize: integer, optional
bin width for 'lag' (default:100).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
ln10 = np.log(10.0)
fig = plt.figure(figsize=(14, 2.8*2))
for i in xrange(2) :
ax = fig.add_subplot(2,3,i+1)
ax.hist(self.flatchain[:,i]/ln10, bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
# line
for i in xrange(2, 5) :
ax = fig.add_subplot(2,3,i+1+1)
if np.mod(i, 3) == 2 :
# lag plots
lagbins = np.arange(int(np.min(self.flatchain[:,i])),
int(np.max(self.flatchain[:,i]))+lagbinsize, lagbinsize)
ax.hist(self.flatchain[:,i], bins=lagbins)
else :
ax.hist(self.flatchain[:,i], bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
return(figure_handler(fig=fig, figout=figout, figext=figext))
def break_chain(self, llag_segments):
""" Break the chain.
Parameters
----------
llag_segments: list of lists
list of a single list, which is a two-element array
bracketing the range of lags (usually the single most probable peak).
"""
if (len(llag_segments) != 1) :
print("Error: llag_segments has to be a list of length 1")
return(1)
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
llag_seq = llag_segments[0]
if llag_seq is None:
print("Warning: no rule to break chains with")
else :
indx = np.argsort(self.flatchain[:, 2])
imin, imax = np.searchsorted(self.flatchain[indx, 2], llag_seq)
indx_cut = indx[imin : imax]
self.flatchain = self.flatchain[indx_cut, :]
def restore_chain(self) :
self.flatchain = np.copy(self.flatchain_whole)
def load_chain(self, fchain, set_verbose=True):
""" Load stored MCMC chain.
Parameters
----------
fchain: string
Name for the chain file.
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
if set_verbose :
print("load MCMC chain from %s"%fchain)
self.flatchain = np.genfromtxt(fchain)
self.flatchain_whole = np.copy(self.flatchain)
self.ndim = self.flatchain.shape[1]
# get HPD
self.get_hpd(set_verbose=set_verbose)
def do_pred(self, p_bst, fpred=None, dense=10, set_overwrite=True) :
""" Calculate the predicted mean and variance of each light curve on a
densely sampled time axis.
Parameters
----------
p_bst: array_like
Input paraemeters.
fpred: string, optional
Name of the output file for the predicted light curves, set it to
None if you do not want output (default: None).
dense: int, optional
The factor by which the predicted light curves should be more
densely sampled than the original data (default: 10).
set_overwrite: bool, optional
True if you want to overwrite existing fpred (default: True).
Returns
-------
zydata_pred: LightCurve object
Predicted light curves packaged as a LightCurve object.
"""
qlist = lnpostfn_sbphoto_p(p_bst, self.zydata, set_retq=True, set_verbose=False)[4]
sigma, tau, lag, wid, scale = unpacksbphotopar(p_bst, self.zydata.nlc)
# update qlist
self.zydata.update_qlist(qlist)
# initialize PredictRmap object
P = PredictSPmap(zydata=self.zydata, sigma=sigma, tau=tau, lag=lag, wid=wid, scale=scale)
nwant = dense*self.cont_npt
jwant0 = self.jstart - 0.1*self.rj
jwant1 = self.jend + 0.1*self.rj
jwant = np.linspace(jwant0, jwant1, nwant)
zylclist_pred = []
iwant = np.ones(nwant)
mve, var = P.mve_var(jwant, iwant)
sig = np.sqrt(var)
zylclist_pred.append([jwant, mve, sig])
zydata_pred = LightCurve(zylclist_pred)
if fpred is not None :
zydata_pred.save(fpred, set_overwrite=set_overwrite)
return(zydata_pred)
###########################################################
|
gpl-2.0
|
aflaxman/scikit-learn
|
sklearn/semi_supervised/label_propagation.py
|
12
|
18811
|
# coding=utf8
"""
Label propagation in the context of this module refers to a set of
semi-supervised classification algorithms. At a high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset given
label assignments over an initial subset. In one variant, the algorithm does
not allow for any errors in the initial assignment (hard-clamping) while
in another variant, the algorithm allows for some wiggle room for the initial
assignments, allowing them to change by a fraction alpha in each iteration
(soft-clamping).
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supports RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Utkarsh Upadhyay <[email protected]>
# License: BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..exceptions import ConvergenceWarning
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : integer
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
elif callable(self.kernel):
if y is None:
return self.kernel(X, X)
else:
return self.kernel(X, y)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" or an explicit function "
" are supported at this time." % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
alpha = self.alpha
if self._variant == 'spreading' and \
(alpha is None or alpha <= 0.0 or alpha >= 1.0):
raise ValueError('alpha=%s is invalid: it must be inside '
'the open interval (0, 1)' % alpha)
y = np.asarray(y)
unlabeled = y == -1
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self._variant == 'propagation':
# LabelPropagation
y_static[unlabeled] = 0
else:
# LabelSpreading
y_static *= 1 - alpha
l_previous = np.zeros((self.X_.shape[0], n_classes))
unlabeled = unlabeled[:, np.newaxis]
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
for self.n_iter_ in range(self.max_iter):
if np.abs(self.label_distributions_ - l_previous).sum() < self.tol:
break
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
if self._variant == 'propagation':
normalizer = np.sum(
self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
self.label_distributions_ = np.where(unlabeled,
self.label_distributions_,
y_static)
else:
# clamp
self.label_distributions_ = np.multiply(
alpha, self.label_distributions_) + y_static
else:
warnings.warn(
'max_iter=%d was reached without convergence.' % self.max_iter,
category=ConvergenceWarning
)
self.n_iter_ += 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix.
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor.
.. deprecated:: 0.19
This parameter will be removed in 0.21.
'alpha' is fixed to zero in 'LabelPropagation'.
max_iter : integer
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
_variant = 'propagation'
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=None, max_iter=1000, tol=1e-3, n_jobs=1):
super(LabelPropagation, self).__init__(
kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha,
max_iter=max_iter, tol=tol, n_jobs=n_jobs)
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
def fit(self, X, y):
if self.alpha is not None:
warnings.warn(
"alpha is deprecated since 0.19 and will be removed in 0.21.",
DeprecationWarning
)
self.alpha = None
return super(LabelPropagation, self).fit(X, y)
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propagation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
Clamping factor. A value in [0, 1] that specifies the relative amount
that an instance should adopt the information from its neighbors as
opposed to its initial label.
alpha=0 means keeping the initial label information; alpha=1 means
replacing all initial information.
max_iter : integer
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
_variant = 'spreading'
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = sparse.csgraph.laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
|
bsd-3-clause
|
harisbal/pandas
|
pandas/io/excel.py
|
1
|
62157
|
"""
Module parse to/from Excel
"""
# ---------------------------------------------------------------------
# ExcelFile class
import abc
from datetime import MINYEAR, date, datetime, time, timedelta
from distutils.version import LooseVersion
from io import UnsupportedOperation
import os
from textwrap import fill
import warnings
import numpy as np
import pandas._libs.json as json
import pandas.compat as compat
from pandas.compat import (
OrderedDict, add_metaclass, lrange, map, range, reduce, string_types, u,
zip)
from pandas.errors import EmptyDataError
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import (
is_bool, is_float, is_integer, is_list_like)
from pandas.core import config
from pandas.core.frame import DataFrame
from pandas.io.common import (
_NA_VALUES, _is_url, _stringify_path, _urlopen, _validate_header_arg,
get_filepath_or_buffer)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
_writer_extensions = ["xlsx", "xls", "xlsm"]
_writers = {}
_read_excel_doc = """
Read an Excel table into a pandas DataFrame
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object, pandas ExcelFile, or xlrd workbook.
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be file://localhost/path/to/workbook.xlsx
sheet_name : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed
sheet positions.
Lists of strings/integers are used to request multiple sheets.
Specify None to get all sheets.
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing
sheets.
Available Cases
* Defaults to 0 -> 1st sheet as a DataFrame
* 1 -> 2nd sheet as a DataFrame
* "Sheet1" -> 1st sheet as a DataFrame
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
* None -> All sheets as a dictionary of DataFrames
sheetname : string, int, mixed list of strings/ints, or None, default 0
.. deprecated:: 0.21.0
Use `sheet_name` instead
header : int, list of ints, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None
index_col : int, list of ints, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
parse_cols : int or list, default None
.. deprecated:: 0.21.0
Pass in `usecols` instead.
usecols : int or list, default None
* If None then parse all columns,
* If int then indicates last column to be parsed
* If list of ints then indicates list of column numbers to be parsed
* If string then indicates comma separated list of Excel column letters and
column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
squeeze : boolean, default False
If the parsed data only contains one column then return a Series
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
.. versionadded:: 0.20.0
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True
.. versionadded:: 0.19.0
false_values : list, default None
Values to consider as False
.. versionadded:: 0.19.0
skiprows : list-like
Rows to skip at the beginning (0-indexed)
nrows : int, default None
Number of rows to parse
.. versionadded:: 0.23.0
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ") + """'.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skip_footer : int, default 0
.. deprecated:: 0.23.0
Pass in `skipfooter` instead.
skipfooter : int, default 0
Rows at the end to skip (0-indexed)
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally
Returns
-------
parsed : DataFrame or Dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a Dict of Dataframes is returned.
Examples
--------
An example DataFrame written to a local file
>>> df_out = pd.DataFrame([('string1', 1),
... ('string2', 2),
... ('string3', 3)],
... columns=['Name', 'Value'])
>>> df_out
Name Value
0 string1 1
1 string2 2
2 string3 3
>>> df_out.to_excel('tmp.xlsx')
The file can be read using the file name as string or an open file object:
>>> pd.read_excel('tmp.xlsx')
Name Value
0 string1 1
1 string2 2
2 string3 3
>>> pd.read_excel(open('tmp.xlsx','rb'))
Name Value
0 string1 1
1 string2 2
2 string3 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None)
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 string3 3
Column types are inferred but can be explicitly specified
>>> pd.read_excel('tmp.xlsx', dtype={'Name':str, 'Value':float})
Name Value
0 string1 1.0
1 string2 2.0
2 string3 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pd.read_excel('tmp.xlsx',
... na_values=['string1', 'string2'])
Name Value
0 NaN 1
1 NaN 2
2 string3 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> df = pd.DataFrame({'a': ['1', '#2'], 'b': ['2', '3']})
>>> df.to_excel('tmp.xlsx', index=False)
>>> pd.read_excel('tmp.xlsx')
a b
0 1 2
1 #2 3
>>> pd.read_excel('tmp.xlsx', comment='#')
a b
0 1 2
"""
def register_writer(klass):
"""Adds engine to the excel writer registry. You must use this method to
integrate with ``to_excel``. Also adds config options for any new
``supported_extensions`` defined on the writer."""
if not compat.callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
for ext in klass.supported_extensions:
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
config.register_option("io.excel.{ext}.writer".format(ext=ext),
engine_name, validator=str)
_writer_extensions.append(ext)
def _get_default_writer(ext):
_default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'}
try:
import xlsxwriter # noqa
_default_writers['xlsx'] = 'xlsxwriter'
except ImportError:
pass
return _default_writers[ext]
def get_writer(engine_name):
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '{engine}'"
.format(engine=engine_name))
@Appender(_read_excel_doc)
@deprecate_kwarg("parse_cols", "usecols")
@deprecate_kwarg("skip_footer", "skipfooter")
def read_excel(io,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
**kwds):
# Can't use _deprecate_kwarg since sheetname=None has a special meaning
if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
warnings.warn("The `sheetname` keyword is deprecated, use "
"`sheet_name` instead", FutureWarning, stacklevel=2)
sheet_name = kwds.pop("sheetname")
if 'sheet' in kwds:
raise TypeError("read_excel() got an unexpected keyword argument "
"`sheet`")
if not isinstance(io, ExcelFile):
io = ExcelFile(io, engine=engine)
return io.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
dtype=dtype,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
**kwds)
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd. See read_excel for more documentation
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object or xlrd workbook
If a string or path object, expected to be a path to xls or xlsx file
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
"""
def __init__(self, io, **kwds):
err_msg = "Install xlrd >= 0.9.0 for Excel support"
try:
import xlrd
except ImportError:
raise ImportError(err_msg)
else:
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9): # pragma: no cover
raise ImportError(err_msg +
". Current version " + xlrd.__VERSION__)
# could be a str, ExcelFile, Book, etc.
self.io = io
# Always a string
self._io = _stringify_path(io)
engine = kwds.pop('engine', None)
if engine is not None and engine != 'xlrd':
raise ValueError("Unknown engine: {engine}".format(engine=engine))
# If io is a url, want to keep the data as bytes so can't pass
# to get_filepath_or_buffer()
if _is_url(self._io):
io = _urlopen(self._io)
elif not isinstance(self.io, (ExcelFile, xlrd.Book)):
io, _, _, _ = get_filepath_or_buffer(self._io)
if engine == 'xlrd' and isinstance(io, xlrd.Book):
self.book = io
elif not isinstance(io, xlrd.Book) and hasattr(io, "read"):
# N.B. xlrd.Book has a read attribute too
if hasattr(io, 'seek'):
try:
# GH 19779
io.seek(0)
except UnsupportedOperation:
# HTTPResponse does not support seek()
# GH 20434
pass
data = io.read()
self.book = xlrd.open_workbook(file_contents=data)
elif isinstance(self._io, compat.string_types):
self.book = xlrd.open_workbook(self._io)
else:
raise ValueError('Must explicitly set engine if not passing in'
' buffer or path for io.')
def __fspath__(self):
return self._io
def parse(self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
**kwds):
"""
Parse specified sheet(s) into a DataFrame
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters
"""
# Can't use _deprecate_kwarg since sheetname=None has a special meaning
if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
warnings.warn("The `sheetname` keyword is deprecated, use "
"`sheet_name` instead", FutureWarning, stacklevel=2)
sheet_name = kwds.pop("sheetname")
elif 'sheetname' in kwds:
raise TypeError("Cannot specify both `sheet_name` "
"and `sheetname`. Use just `sheet_name`")
return self._parse_excel(sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
**kwds)
def _should_parse(self, i, usecols):
def _range2cols(areas):
"""
Convert comma separated list of column names and column ranges to a
list of 0-based column indexes.
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
def _excel2num(x):
"Convert Excel column name like 'AB' to 0-based column index"
return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1,
x.upper().strip(), 0) - 1
cols = []
for rng in areas.split(','):
if ':' in rng:
rng = rng.split(':')
cols += lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)
else:
cols.append(_excel2num(rng))
return cols
if isinstance(usecols, int):
return i <= usecols
elif isinstance(usecols, compat.string_types):
return i in _range2cols(usecols)
else:
return i in usecols
def _parse_excel(self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
**kwds):
_validate_header_arg(header)
if 'chunksize' in kwds:
raise NotImplementedError("chunksize keyword of read_excel "
"is not implemented")
if parse_dates is True and index_col is None:
warnings.warn("The 'parse_dates=True' keyword of read_excel was "
"provided without an 'index_col' keyword value.")
import xlrd
from xlrd import (xldate, XL_CELL_DATE,
XL_CELL_ERROR, XL_CELL_BOOLEAN,
XL_CELL_NUMBER)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents, cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
if cell_typ == XL_CELL_DATE:
if xlrd_0_9_3:
# Use the newer xlrd datetime handling.
try:
cell_contents = \
xldate.xldate_as_datetime(cell_contents,
epoch1904)
except OverflowError:
return cell_contents
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if ((not epoch1904 and year == (1899, 12, 31)) or
(epoch1904 and year == (1904, 1, 1))):
cell_contents = time(cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond)
else:
# Use the xlrd <= 0.9.2 date handling.
try:
dt = xldate.xldate_as_tuple(cell_contents, epoch1904)
except xldate.XLDateTooLarge:
return cell_contents
if dt[0] < MINYEAR:
cell_contents = time(*dt[3:])
else:
cell_contents = datetime(*dt)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less surprising
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
# xlrd >= 0.9.3 can return datetime objects directly.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
xlrd_0_9_3 = True
else:
xlrd_0_9_3 = False
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheet_name, list):
sheets = sheet_name
ret_dict = True
elif sheet_name is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheet_name]
# handle same-type duplicates.
sheets = list(OrderedDict.fromkeys(sheets).keys())
output = OrderedDict()
for asheetname in sheets:
if verbose:
print("Reading sheet {sheet}".format(sheet=asheetname))
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.book.sheet_by_index(asheetname)
data = []
should_parse = {}
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(zip(sheet.row_values(i),
sheet.row_types(i))):
if usecols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, usecols)
if usecols is None or should_parse[j]:
row.append(_parse_cell(value, typ))
data.append(row)
if sheet.nrows == 0:
output[asheetname] = DataFrame()
continue
if is_list_like(header) and len(header) == 1:
header = header[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None:
if is_list_like(header):
header_names = []
control_row = [True] * len(data[0])
for row in header:
if is_integer(skiprows):
row += skiprows
data[row], control_row = _fill_mi_header(
data[row], control_row)
header_name, data[row] = _pop_header_name(
data[row], index_col)
header_names.append(header_name)
else:
data[header] = _trim_excel_header(data[header])
if is_list_like(index_col):
# forward fill values for MultiIndex index
if not is_list_like(header):
offset = 1 + header
else:
offset = 1 + max(header)
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == '' or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
has_index_names = is_list_like(header) and len(header) > 1
# GH 12292 : error when read one empty column from excel file
try:
parser = TextParser(data,
header=header,
index_col=index_col,
has_index_names=has_index_names,
squeeze=squeeze,
dtype=dtype,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
**kwds)
output[asheetname] = parser.read(nrows=nrows)
if names is not None:
output[asheetname].columns = names
if not squeeze or isinstance(output[asheetname], DataFrame):
output[asheetname].columns = output[
asheetname].columns.set_names(header_names)
except EmptyDataError:
# No Data, return an empty DataFrame
output[asheetname] = DataFrame()
if ret_dict:
return output
else:
return output[asheetname]
@property
def sheet_names(self):
return self.book.sheet_names()
def close(self):
"""close io if necessary"""
if hasattr(self.io, 'close'):
self.io.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _validate_freeze_panes(freeze_panes):
if freeze_panes is not None:
if (
len(freeze_panes) == 2 and
all(isinstance(item, int) for item in freeze_panes)
):
return True
raise ValueError("freeze_panes must be of form (row, column)"
" where row and column are integers")
# freeze_panes wasn't specified, return False so it won't be applied
# to output sheet
return False
def _trim_excel_header(row):
# trim header row so auto-index inference works
# xlrd uses '' , openpyxl None
while len(row) > 0 and (row[0] == '' or row[0] is None):
row = row[1:]
return row
def _fill_mi_header(row, control_row):
"""Forward fills blank entries in row, but only inside the same parent index
Used for creating headers in Multiindex.
Parameters
----------
row : list
List of items in a single row.
control_row : list of boolean
Helps to determine if particular column is in same parent index as the
previous value. Used to stop propagation of empty cells between
different indexes.
Returns
----------
Returns changed row and control_row
"""
last = row[0]
for i in range(1, len(row)):
if not control_row[i]:
last = row[i]
if row[i] == '' or row[i] is None:
row[i] = last
else:
control_row[i] = False
last = row[i]
return row, control_row
# fill blank if index_col not None
def _pop_header_name(row, index_col):
""" (header, new_data) for header rows in MultiIndex parsing"""
none_fill = lambda x: None if x == '' else x
if index_col is None:
# no index col specified, trim data for inference path
return none_fill(row[0]), row[1:]
else:
# pop out header name and fill w/ blank
i = index_col if not is_list_like(index_col) else max(index_col)
return none_fill(row[i]), row[:i] + [''] + row[i + 1:]
@add_metaclass(abc.ABCMeta)
class ExcelWriter(object):
"""
Class for writing DataFrame objects into excel sheets, default is to use
xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage.
Parameters
----------
path : string
Path to xls or xlsx file.
engine : string (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_format : string, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD')
datetime_format : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
mode : {'w' or 'a'}, default 'w'
File mode to use (write or append).
.. versionadded:: 0.24.0
Notes
-----
None of the methods and properties are considered public.
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
Examples
--------
Default usage:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... df.to_excel(writer)
To write to separate sheets in a single file:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... df1.to_excel(writer, sheet_name='Sheet1')
... df2.to_excel(writer, sheet_name='Sheet2')
You can set the date format or datetime format:
>>> with ExcelWriter('path_to_file.xlsx',
date_format='YYYY-MM-DD',
datetime_format='YYYY-MM-DD HH:MM:SS') as writer:
... df.to_excel(writer)
You can also append to an existing Excel file:
>>> with ExcelWriter('path_to_file.xlsx', mode='a') as writer:
... df.to_excel(writer, sheet_name='Sheet3')
Attributes
----------
None
Methods
-------
None
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if issubclass(cls, ExcelWriter):
if engine is None or (isinstance(engine, string_types) and
engine == 'auto'):
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1][1:]
else:
ext = 'xlsx'
try:
engine = config.get_option('io.excel.{ext}.writer'
.format(ext=ext))
if engine == 'auto':
engine = _get_default_writer(ext)
except KeyError:
error = ValueError("No engine for filetype: '{ext}'"
.format(ext=ext))
raise error
cls = get_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
book = None
curr_sheet = None
path = None
@abc.abstractproperty
def supported_extensions(self):
"extensions that writer engine supports"
pass
@abc.abstractproperty
def engine(self):
"name of engine"
pass
@abc.abstractmethod
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
"""
Write given formatted cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formatted data to save to Excel sheet
sheet_name : string, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow: upper left cell row to dump data frame
startcol: upper left cell column to dump data frame
freeze_panes: integer tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
pass
@abc.abstractmethod
def save(self):
"""
Save workbook to disk.
"""
pass
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, mode='w',
**engine_kwargs):
# validate that this engine can handle the extension
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1]
else:
ext = 'xls' if engine == 'xlwt' else 'xlsx'
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_format is None:
self.date_format = 'YYYY-MM-DD'
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = 'YYYY-MM-DD HH:MM:SS'
else:
self.datetime_format = datetime_format
self.mode = mode
def __fspath__(self):
return _stringify_path(self.path)
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError('Must pass explicit sheet_name or set '
'cur_sheet property')
return sheet_name
def _value_with_fmt(self, val):
"""Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, datetime):
fmt = self.datetime_format
elif isinstance(val, date):
fmt = self.date_format
elif isinstance(val, timedelta):
val = val.total_seconds() / float(86400)
fmt = '0'
else:
val = compat.to_str(val)
return val, fmt
@classmethod
def check_extension(cls, ext):
"""checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith('.'):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
msg = (u("Invalid extension for engine '{engine}': '{ext}'")
.format(engine=pprint_thing(cls.engine),
ext=pprint_thing(ext)))
raise ValueError(msg)
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""synonym for save, to make it more file-like"""
return self.save()
class _OpenpyxlWriter(ExcelWriter):
engine = 'openpyxl'
supported_extensions = ('.xlsx', '.xlsm')
def __init__(self, path, engine=None, mode='w', **engine_kwargs):
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super(_OpenpyxlWriter, self).__init__(path, mode=mode, **engine_kwargs)
if self.mode == 'a': # Load from existing workbook
from openpyxl import load_workbook
book = load_workbook(self.path)
self.book = book
else:
# Create workbook object with default optimized_write=True.
self.book = Workbook()
if self.book.worksheets:
try:
self.book.remove(self.book.worksheets[0])
except AttributeError:
# compat - for openpyxl <= 2.4
self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
@classmethod
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict: style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__getattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__getattribute__(key).__setattr__(nk, nv)
return xls_style
@classmethod
def _convert_to_style_kwargs(cls, style_dict):
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {
'borders': 'border',
}
style_kwargs = {}
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, '_convert_to_{k}'.format(k=k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
'sz': 'size',
'b': 'bold',
'i': 'italic',
'u': 'underline',
'strike': 'strikethrough',
'vertalign': 'vertAlign',
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict):
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import PatternFill, GradientFill
_pattern_fill_key_map = {
'patternType': 'fill_type',
'patterntype': 'fill_type',
'fgColor': 'start_color',
'fgcolor': 'start_color',
'bgColor': 'end_color',
'bgcolor': 'end_color',
}
_gradient_fill_key_map = {
'fill_type': 'type',
}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = gk = None
if k in _pattern_fill_key_map:
pk = _pattern_fill_key_map[k]
if k in _gradient_fill_key_map:
gk = _gradient_fill_key_map[k]
if pk in ['start_color', 'end_color']:
v = cls._convert_to_color(v)
if gk == 'stop':
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {
'border_style': 'style',
}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {
'diagonalup': 'diagonalUp',
'diagonaldown': 'diagonalDown',
}
border_kwargs = {}
for k, v in border_dict.items():
if k in _border_key_map:
k = _border_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
if k in ['left', 'right', 'top', 'bottom', 'diagonal']:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
return number_format_dict['format_code']
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using openpyxl.
sheet_name = self._get_sheet_name(sheet_name)
_style_cache = {}
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
if _validate_freeze_panes(freeze_panes):
wks.freeze_panes = wks.cell(row=freeze_panes[0] + 1,
column=freeze_panes[1] + 1)
for cell in cells:
xcell = wks.cell(
row=startrow + cell.row + 1,
column=startcol + cell.col + 1
)
xcell.value, fmt = self._value_with_fmt(cell.val)
if fmt:
xcell.number_format = fmt
style_kwargs = {}
if cell.style:
key = str(cell.style)
style_kwargs = _style_cache.get(key)
if style_kwargs is None:
style_kwargs = self._convert_to_style_kwargs(cell.style)
_style_cache[key] = style_kwargs
if style_kwargs:
for k, v in style_kwargs.items():
setattr(xcell, k, v)
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_cells(
start_row=startrow + cell.row + 1,
start_column=startcol + cell.col + 1,
end_column=startcol + cell.mergeend + 1,
end_row=startrow + cell.mergestart + 1
)
# When cells are merged only the top-left cell is preserved
# The behaviour of the other cells in a merged range is
# undefined
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
xcell = wks.cell(column=col, row=row)
for k, v in style_kwargs.items():
setattr(xcell, k, v)
register_writer(_OpenpyxlWriter)
class _XlwtWriter(ExcelWriter):
engine = 'xlwt'
supported_extensions = ('.xls',)
def __init__(self, path, engine=None, encoding=None, mode='w',
**engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
engine_kwargs['engine'] = engine
if mode == 'a':
raise ValueError('Append mode is not supported with xlwt!')
super(_XlwtWriter, self).__init__(path, mode=mode, **engine_kwargs)
if encoding is None:
encoding = 'ascii'
self.book = xlwt.Workbook(encoding=encoding)
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using xlwt.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_sheet(sheet_name)
self.sheets[sheet_name] = wks
if _validate_freeze_panes(freeze_panes):
wks.set_panes_frozen(True)
wks.set_horz_split_pos(freeze_panes[0])
wks.set_vert_split_pos(freeze_panes[1])
style_dict = {}
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, fmt)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.write_merge(startrow + cell.row,
startrow + cell.mergestart,
startcol + cell.col,
startcol + cell.mergeend,
val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
@classmethod
def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
line_sep=';'):
"""helper which recursively generate an xlwt easy style string
for example:
hstyle = {"font": {"bold": True},
"border": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"align": {"horiz": "center"}}
will be converted to
font: bold on; \
border: top thin, right thin, bottom thin, left thin; \
align: horiz center;
"""
if hasattr(item, 'items'):
if firstlevel:
it = ["{key}: {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(line_sep).join(it))
return out
else:
it = ["{key} {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(field_sep).join(it))
return out
else:
item = "{item}".format(item=item)
item = item.replace("True", "on")
item = item.replace("False", "off")
return item
@classmethod
def _convert_to_style(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlwt style object
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
import xlwt
if style_dict:
xlwt_stylestr = cls._style_to_xlwt(style_dict)
style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
else:
style = xlwt.XFStyle()
if num_format_str is not None:
style.num_format_str = num_format_str
return style
register_writer(_XlwtWriter)
class _XlsxStyler(object):
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
# Ordering necessary for both determinism and because some are keyed by
# prefixes of others.
STYLE_MAPPING = {
'font': [
(('name',), 'font_name'),
(('sz',), 'font_size'),
(('size',), 'font_size'),
(('color', 'rgb',), 'font_color'),
(('color',), 'font_color'),
(('b',), 'bold'),
(('bold',), 'bold'),
(('i',), 'italic'),
(('italic',), 'italic'),
(('u',), 'underline'),
(('underline',), 'underline'),
(('strike',), 'font_strikeout'),
(('vertAlign',), 'font_script'),
(('vertalign',), 'font_script'),
],
'number_format': [
(('format_code',), 'num_format'),
((), 'num_format',),
],
'protection': [
(('locked',), 'locked'),
(('hidden',), 'hidden'),
],
'alignment': [
(('horizontal',), 'align'),
(('vertical',), 'valign'),
(('text_rotation',), 'rotation'),
(('wrap_text',), 'text_wrap'),
(('indent',), 'indent'),
(('shrink_to_fit',), 'shrink'),
],
'fill': [
(('patternType',), 'pattern'),
(('patterntype',), 'pattern'),
(('fill_type',), 'pattern'),
(('start_color', 'rgb',), 'fg_color'),
(('fgColor', 'rgb',), 'fg_color'),
(('fgcolor', 'rgb',), 'fg_color'),
(('start_color',), 'fg_color'),
(('fgColor',), 'fg_color'),
(('fgcolor',), 'fg_color'),
(('end_color', 'rgb',), 'bg_color'),
(('bgColor', 'rgb',), 'bg_color'),
(('bgcolor', 'rgb',), 'bg_color'),
(('end_color',), 'bg_color'),
(('bgColor',), 'bg_color'),
(('bgcolor',), 'bg_color'),
],
'border': [
(('color', 'rgb',), 'border_color'),
(('color',), 'border_color'),
(('style',), 'border'),
(('top', 'color', 'rgb',), 'top_color'),
(('top', 'color',), 'top_color'),
(('top', 'style',), 'top'),
(('top',), 'top'),
(('right', 'color', 'rgb',), 'right_color'),
(('right', 'color',), 'right_color'),
(('right', 'style',), 'right'),
(('right',), 'right'),
(('bottom', 'color', 'rgb',), 'bottom_color'),
(('bottom', 'color',), 'bottom_color'),
(('bottom', 'style',), 'bottom'),
(('bottom',), 'bottom'),
(('left', 'color', 'rgb',), 'left_color'),
(('left', 'color',), 'left_color'),
(('left', 'style',), 'left'),
(('left',), 'left'),
],
}
@classmethod
def convert(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format dict
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
# Create a XlsxWriter format object.
props = {}
if num_format_str is not None:
props['num_format'] = num_format_str
if style_dict is None:
return props
if 'borders' in style_dict:
style_dict = style_dict.copy()
style_dict['border'] = style_dict.pop('borders')
for style_group_key, style_group in style_dict.items():
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
# src is a sequence of keys into a nested dict
# dst is a flat key
if dst in props:
continue
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
else:
props[dst] = v
if isinstance(props.get('pattern'), string_types):
# TODO: support other fill patterns
props['pattern'] = 0 if props['pattern'] == 'none' else 1
for k in ['border', 'top', 'right', 'bottom', 'left']:
if isinstance(props.get(k), string_types):
try:
props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted',
'thick', 'double', 'hair', 'mediumDashed',
'dashDot', 'mediumDashDot', 'dashDotDot',
'mediumDashDotDot',
'slantDashDot'].index(props[k])
except ValueError:
props[k] = 2
if isinstance(props.get('font_script'), string_types):
props['font_script'] = ['baseline', 'superscript',
'subscript'].index(props['font_script'])
if isinstance(props.get('underline'), string_types):
props['underline'] = {'none': 0, 'single': 1, 'double': 2,
'singleAccounting': 33,
'doubleAccounting': 34}[props['underline']]
return props
class _XlsxWriter(ExcelWriter):
engine = 'xlsxwriter'
supported_extensions = ('.xlsx',)
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, mode='w',
**engine_kwargs):
# Use the xlsxwriter module as the Excel writer.
import xlsxwriter
if mode == 'a':
raise ValueError('Append mode is not supported with xlsxwriter!')
super(_XlsxWriter, self).__init__(path, engine=engine,
date_format=date_format,
datetime_format=datetime_format,
mode=mode,
**engine_kwargs)
self.book = xlsxwriter.Workbook(path, **engine_kwargs)
def save(self):
"""
Save workbook to disk.
"""
return self.book.close()
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {'null': None}
if _validate_freeze_panes(freeze_panes):
wks.freeze_panes(*(freeze_panes))
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self.book.add_format(
_XlsxStyler.convert(cell.style, fmt))
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
cell.val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
register_writer(_XlsxWriter)
|
bsd-3-clause
|
mojoboss/scikit-learn
|
examples/manifold/plot_compare_methods.py
|
259
|
4031
|
"""
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
WangWenjun559/Weiss
|
summary/sumy/sklearn/decomposition/tests/test_fastica.py
|
272
|
7798
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
apache-2.0
|
desihub/desisim
|
py/desisim/qso_template/desi_qso_templ.py
|
1
|
19250
|
"""
desisim.qso_template.desi_qso_templ
===================================
Module for Fitting PCA to the BOSS QSOs
01-Dec-2014 by JXP
"""
from __future__ import print_function, absolute_import, division
import numpy as np
import os
import imp
import pdb
from scipy.interpolate import interp1d
from astropy.io import fits
try:
from scipy import constants
C_LIGHT = constants.c/1000.0
except TypeError: # This can happen during documentation builds.
C_LIGHT = 299792458.0/1000.0
from desisim.qso_template import fit_boss_qsos as fbq
from desiutil.stats import perc
import desisim.io
from desispec.interpolation import resample_flux
from desiutil.log import get_logger
log = get_logger()
#from xastropy.stats.basic import perc
flg_xdb = True
try:
from xastropy.xutils import xdebug as xdb
except ImportError:
flg_xdb = False
def mean_templ_zi(zimag, debug=False, i_wind=0.1, z_wind=0.05,
boss_pca_fil=None):
'''
Generate 'mean' templates at given z,i
Parameters
----------
zimag: list of tuples
Redshift, imag pairs for the templates
i_wind: float (0.1 mag)
Window for smoothing imag
z_wind: float (0.05 mag)
Window for smoothing redshift
'''
# PCA values
if boss_pca_fil is None:
boss_pca_fil = 'BOSS_DR10Lya_PCA_values_nocut.fits.gz'
hdu = fits.open(boss_pca_fil)
pca_coeff = hdu[1].data
# BOSS Eigenvectors
eigen, eigen_wave = fbq.read_qso_eigen()
npix = len(eigen_wave)
# Open the BOSS catalog file
boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
bcat_hdu = fits.open(boss_cat_fil)
t_boss = bcat_hdu[1].data
zQSO = t_boss['z_pipe']
tmp = t_boss['PSFMAG']
imag = tmp[:,3] # i-band mag
# Output array
ntempl = len(zimag)
out_spec = np.zeros( (ntempl, npix) )
# Iterate on z,imag
for izi in zimag:
tt = zimag.index(izi)
# Find matches
idx = np.where( (np.fabs(imag-izi[1]) < i_wind) &
(np.fabs(zQSO-izi[0]) < z_wind))[0]
if len(idx) < 50:
raise ValueError('mean_templ_zi: Not enough QSOs! {:d}'.format(len(idx)))
# Calculate median PCA values
PCA0 = np.median(pca_coeff['PCA0'][idx])
PCA1 = np.median(pca_coeff['PCA1'][idx])
PCA2 = np.median(pca_coeff['PCA2'][idx])
PCA3 = np.median(pca_coeff['PCA3'][idx])
acoeff = np.array( [PCA0, PCA1, PCA2, PCA3] )
# Make the template
out_spec[tt,:] = np.dot(eigen.T,acoeff)
if debug is True:
xdb.xplot(eigen_wave*(1.+izi[0]), out_spec[tt,:])
xdb.set_trace()
# Return
return out_spec
# ##################### #####################
# ##################### #####################
# Plots DESI templates at a range of z and imag
def fig_desi_templ_z_i(outfil=None, boss_fil=None, flg=0):
'''
flg = 0: Redshift
flg = 1: imag
'''
# Todo
# Include NHI on the label
# Imports
import matplotlib as mpl
mpl.rcParams['font.family'] = 'stixgeneral'
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
# Eigen (for wavelengths)
eigen, eigen_wave = fbq.read_qso_eigen()
all_zi = [ [ (2.3, 18.5), (2.3, 19.5), (2.3, 20.5), (2.3, 21.5) ],
[ (2.5, 18.5), (2.5, 19.5), (2.5, 20.5), (2.5, 21.5) ],
[ (2.7, 19.5), (2.7, 20.5), (2.7, 21.5) ],
[ (3.2, 19.5), (3.2, 20.5), (3.2, 21.5) ] ]
xmnx = (3600., 9000.)
# Start the plot
if outfil != None:
pp = PdfPages(outfil)
plt.figure(figsize=(8, 5))
plt.clf()
gs = gridspec.GridSpec(4, 1)
# Looping
for ii in range(4):
# Get the templates
ztempl = all_zi[ii][0][0]
spec = mean_templ_zi(all_zi[ii])
# Axis
ax = plt.subplot(gs[ii])
#ax = plt.subplot(gs[ii//2,ii%2])
# Labels
if ii == 3:
ax.set_xlabel('Wavelength')
else:
ax.get_xaxis().set_ticks([])
ax.set_ylabel('Flux')
ax.set_xlim(xmnx)
# Data
# Values
for jj in range(len(all_zi[ii])):
ax.plot( eigen_wave*(1.+ ztempl),
spec[jj,:], '-',drawstyle='steps-mid', linewidth=0.5)
if jj == 0:
ymx = 1.05*np.max(spec[jj,:])
ax.set_ylim((0., ymx))
# Label
zlbl = 'z={:g}'.format(ztempl)
ax.text(7000., ymx*0.7, zlbl)
# Layout and save
plt.tight_layout(pad=0.2,h_pad=0.0,w_pad=0.25)
if outfil != None:
pp.savefig(bbox_inches='tight')
pp.close()
else:
plt.show()
def desi_qso_templates(z_wind=0.2, zmnx=(0.4,4.), outfil=None, N_perz=500,
boss_pca_fil=None, wvmnx=(3500., 10000.),
rebin_wave=None, rstate=None,
sdss_pca_fil=None, no_write=False, redshift=None,
seed=None, old_read=False, ipad=40, cosmo=None):
""" Generate QSO templates for DESI
Rebins to input wavelength array (or log10 in wvmnx)
Parameters
----------
z_wind : float, optional
Window for sampling PCAs
zmnx : tuple, optional
Min/max for generation
N_perz : int, optional
Number of draws per redshift window
old_read : bool, optional
Read the files the old way
seed : int, optional
Seed for the random number state
rebin_wave : ndarray, optional
Input wavelengths for rebinning
wvmnx : tuple, optional
Wavelength limits for rebinning (not used with rebin_wave)
redshift : ndarray, optional
Redshifts desired for the templates
ipad : int, optional
Padding for enabling enough models
cosmo: astropy.cosmology.core, optional
Cosmology inistantiation from astropy.cosmology.code
Returns
-------
wave : ndarray
Wavelengths that the spectra were rebinned to
flux : ndarray (2D; flux vs. model)
z : ndarray
Redshifts
"""
# Cosmology
if cosmo is None:
from astropy import cosmology
cosmo = cosmology.core.FlatLambdaCDM(70., 0.3)
if old_read:
# PCA values
if boss_pca_fil is None:
boss_pca_fil = 'BOSS_DR10Lya_PCA_values_nocut.fits.gz'
hdu = fits.open(boss_pca_fil)
boss_pca_coeff = hdu[1].data
if sdss_pca_fil is None:
sdss_pca_fil = 'SDSS_DR7Lya_PCA_values_nocut.fits.gz'
hdu2 = fits.open(sdss_pca_fil)
sdss_pca_coeff = hdu2[1].data
# Open the BOSS catalog file
boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
bcat_hdu = fits.open(boss_cat_fil)
t_boss = bcat_hdu[1].data
boss_zQSO = t_boss['z_pipe']
# Open the SDSS catalog file
sdss_cat_fil = os.environ.get('SDSSPATH')+'/DR7_QSO/dr7_qso.fits.gz'
scat_hdu = fits.open(sdss_cat_fil)
t_sdss = scat_hdu[1].data
sdss_zQSO = t_sdss['z']
if len(sdss_pca_coeff) != len(sdss_zQSO):
print('Need to finish running the SDSS models!')
sdss_zQSO = sdss_zQSO[0:len(sdss_pca_coeff)]
# Eigenvectors
eigen, eigen_wave = fbq.read_qso_eigen()
else:
infile = desisim.io.find_basis_template('qso')
with fits.open(infile) as hdus:
hdu_names = [hdus[ii].name for ii in range(len(hdus))]
boss_pca_coeff = hdus[hdu_names.index('BOSS_PCA')].data
sdss_pca_coeff = hdus[hdu_names.index('SDSS_PCA')].data
boss_zQSO = hdus[hdu_names.index('BOSS_Z')].data
sdss_zQSO = hdus[hdu_names.index('SDSS_Z')].data
eigen = hdus[hdu_names.index('SDSS_EIGEN')].data
eigen_wave = hdus[hdu_names.index('SDSS_EIGEN_WAVE')].data
# Fiddle with the eigen-vectors
npix = len(eigen_wave)
chkpix = np.where((eigen_wave > 900.) & (eigen_wave < 5000.) )[0]
lambda_912 = 911.76
pix912 = np.argmin( np.abs(eigen_wave-lambda_912) )
# Loop on redshift. If the
if redshift is None:
z0 = np.arange(zmnx[0],zmnx[1],z_wind)
z1 = z0 + z_wind
else:
if np.isscalar(redshift):
z0 = np.array([redshift])
else:
z0 = redshift.copy()
z1 = z0.copy() #+ z_wind
pca_list = ['PCA0', 'PCA1', 'PCA2', 'PCA3']
PCA_mean = np.zeros(4)
PCA_sig = np.zeros(4)
PCA_rand = np.zeros((4,N_perz*ipad))
final_spec = np.zeros((npix, N_perz * len(z0)))
final_wave = np.zeros((npix, N_perz * len(z0)))
final_z = np.zeros(N_perz * len(z0))
# Random state
if rstate is None:
rstate = np.random.RandomState(seed)
for ii in range(len(z0)):
# BOSS or SDSS?
if z0[ii] > 2.15:
zQSO = boss_zQSO
pca_coeff = boss_pca_coeff
else:
zQSO = sdss_zQSO
pca_coeff = sdss_pca_coeff
# Random z values and wavelengths
zrand = rstate.uniform( z0[ii], z1[ii], N_perz*ipad)
wave = np.outer(eigen_wave, 1+zrand)
# MFP (Worseck+14)
mfp = 37. * ( (1+zrand)/5. )**(-5.4) # Physical Mpc
# Grab PCA mean + sigma
if redshift is None:
idx = np.where( (zQSO >= z0[ii]) & (zQSO < z1[ii]) )[0]
else:
# Hack by @moustakas: add a little jitter to get the set of QSOs
# that are *nearest* in redshift to the desired output redshift.
idx = np.where( (zQSO >= z0[ii]-0.01) & (zQSO < z1[ii]+0.01) )[0]
if len(idx) == 0:
idx = np.array([(np.abs(zQSO-zrand[0])).argmin()])
#pdb.set_trace()
log.debug('Making z=({:g},{:g}) with {:d} input quasars'.format(z0[ii],z1[ii],len(idx)))
# Get PCA stats and random values
for jj,ipca in enumerate(pca_list):
if jj == 0: # Use bounds for PCA0 [avoids negative values]
xmnx = perc(pca_coeff[ipca][idx], per=95)
PCA_rand[jj, :] = rstate.uniform(xmnx[0], xmnx[1], N_perz*ipad)
else:
PCA_mean[jj] = np.mean(pca_coeff[ipca][idx])
PCA_sig[jj] = np.std(pca_coeff[ipca][idx])
# Draws
PCA_rand[jj, :] = rstate.uniform( PCA_mean[jj] - 2*PCA_sig[jj],
PCA_mean[jj] + 2*PCA_sig[jj], N_perz*ipad)
# Generate the templates (ipad*N_perz)
spec = np.dot(eigen.T, PCA_rand)
# Take first good N_perz
# Truncate, MFP, Fill
ngd = 0
nbad = 0
for kk in range(ipad*N_perz):
# Any zero values?
mn = np.min(spec[chkpix, kk])
if mn < 0.:
nbad += 1
continue
# MFP
if z0[ii] > 2.39:
z912 = wave[0:pix912,kk]/lambda_912 - 1.
phys_dist = np.fabs( cosmo.lookback_distance(z912) -
cosmo.lookback_distance(zrand[kk]) ) # Mpc
spec[0:pix912, kk] = spec[0:pix912,kk] * np.exp(-phys_dist.value/mfp[kk])
# Write
final_spec[:, ii*N_perz+ngd] = spec[:,kk]
final_wave[:, ii*N_perz+ngd] = wave[:,kk]
final_z[ii*N_perz+ngd] = zrand[kk]
ngd += 1
if ngd == N_perz:
break
if ngd != N_perz:
print('Did not make enough!')
#pdb.set_trace()
log.warning('Did not make enough qso templates. ngd = {}, N_perz = {}'.format(ngd,N_perz))
# Rebin
if rebin_wave is None:
light = C_LIGHT # [km/s]
velpixsize = 10. # [km/s]
pixsize = velpixsize/light/np.log(10) # [pixel size in log-10 A]
minwave = np.log10(wvmnx[0]) # minimum wavelength [log10-A]
maxwave = np.log10(wvmnx[1]) # maximum wavelength [log10-A]
r_npix = np.round((maxwave-minwave)/pixsize+1)
log_wave = minwave+np.arange(r_npix)*pixsize # constant log-10 spacing
else:
log_wave = np.log10(rebin_wave)
r_npix = len(log_wave)
totN = N_perz * len(z0)
rebin_spec = np.zeros((r_npix, totN))
for ii in range(totN):
# Interpolate (in log space)
rebin_spec[:, ii] = resample_flux(log_wave, np.log10(final_wave[:, ii]), final_spec[:, ii])
#f1d = interp1d(np.log10(final_wave[:,ii]), final_spec[:,ii])
#rebin_spec[:,ii] = f1d(log_wave)
if outfil is None:
return 10.**log_wave, rebin_spec, final_z
# Transpose for consistency
out_spec = np.array(rebin_spec.T, dtype='float32')
# Write
hdu = fits.PrimaryHDU(out_spec)
hdu.header.set('PROJECT', 'DESI QSO TEMPLATES')
hdu.header.set('VERSION', '1.1')
hdu.header.set('OBJTYPE', 'QSO')
hdu.header.set('DISPAXIS', 1, 'dispersion axis')
hdu.header.set('CRPIX1', 1, 'reference pixel number')
hdu.header.set('CRVAL1', minwave, 'reference log10(Ang)')
hdu.header.set('CDELT1', pixsize, 'delta log10(Ang)')
hdu.header.set('LOGLAM', 1, 'log10 spaced wavelengths?')
hdu.header.set('AIRORVAC', 'vac', ' wavelengths in vacuum (vac) or air')
hdu.header.set('VELSCALE', velpixsize, ' pixel size in km/s')
hdu.header.set('WAVEUNIT', 'Angstrom', ' wavelength units')
hdu.header.set('BUNIT', '1e-17 erg/s/cm2/A', ' flux unit')
idval = list(range(totN))
col0 = fits.Column(name=str('TEMPLATEID'),format=str('J'), array=idval)
col1 = fits.Column(name=str('Z'),format=str('E'),array=final_z)
cols = fits.ColDefs([col0, col1])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.header.set('EXTNAME','METADATA')
hdulist = fits.HDUList([hdu, tbhdu])
hdulist.writeto(outfil, overwrite=True)
return final_wave, final_spec, final_z
# ##################### #####################
# ##################### #####################
# Plots DESI templates at a range of z and imag
def chk_desi_qso_templates(infil=None, outfil=None, N_perz=100):
'''
'''
# Get the templates
if infil is None:
final_wave, final_spec, final_z = desi_qso_templates(N_perz=N_perz, #zmnx=(0.4,0.8),
no_write=True)
sz = final_spec.shape
npage = sz[1] // N_perz
# Imports
import matplotlib as mpl
mpl.rcParams['font.family'] = 'stixgeneral'
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
# Eigen (for wavelengths)
xmnx = (3600., 10000.)
# Start the plot
if outfil != None:
pp = PdfPages(outfil)
# Looping
for ii in range(npage):
#for ii in range(1):
i0 = ii * N_perz
i1 = i0 + N_perz
ymx = 0.
plt.figure(figsize=(8, 5))
plt.clf()
gs = gridspec.GridSpec(1, 1)
# Axis
ax = plt.subplot(gs[0])
#ax = plt.subplot(gs[ii//2,ii%2])
# Labels
ax.set_xlabel('Wavelength')
ax.set_ylabel('Flux')
ax.set_xlim(xmnx)
# Data
#for jj in range(i0,i1):
for jj in range(i0,i0+15):
ax.plot( final_wave[:,jj], final_spec[:,jj],
'-',drawstyle='steps-mid', linewidth=0.5)
ymx = max( ymx, np.max(final_spec[:,jj]) )
ax.set_ylim( (0., ymx*1.05) )
# Label
zmin = np.min(final_z[i0:i1])
zmax = np.max(final_z[i0:i1])
zlbl = 'z=[{:g},{:g}]'.format(zmin,zmax)
ax.text(7000., ymx*0.7, zlbl)
# Layout and save
plt.tight_layout(pad=0.2,h_pad=0.0,w_pad=0.25)
if outfil != None:
pp.savefig()#bbox_inches='tight')
plt.close()
else:
plt.show()
pp.close()
def repackage_coeff(boss_pca_fil=None, sdss_pca_fil=None,
outfil='qso_templates_v2.0.fits'):
""" Repackage the coefficients and redshifts into a single FITS file
:return:
"""
# PCA values
if boss_pca_fil is None:
boss_pca_fil = 'BOSS_DR10Lya_PCA_values_nocut.fits.gz'
hdu = fits.open(boss_pca_fil)
boss_pca_coeff = hdu[1].data
if sdss_pca_fil is None:
sdss_pca_fil = 'SDSS_DR7Lya_PCA_values_nocut.fits.gz'
hdu2 = fits.open(sdss_pca_fil)
sdss_pca_coeff = hdu2[1].data
# Redshifts
boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
bcat_hdu = fits.open(boss_cat_fil)
t_boss = bcat_hdu[1].data
boss_zQSO = np.array(t_boss['z_pipe'])
# Open the SDSS catalog file
sdss_cat_fil = os.environ.get('SDSSPATH')+'/DR7_QSO/dr7_qso.fits.gz'
scat_hdu = fits.open(sdss_cat_fil)
t_sdss = scat_hdu[1].data
sdss_zQSO = t_sdss['z']
if len(sdss_pca_coeff) != len(sdss_zQSO):
print('Need to finish running the SDSS models!')
sdss_zQSO = sdss_zQSO[0:len(sdss_pca_coeff)]
# Eigen vectors
eigen, eigen_wave = fbq.read_qso_eigen()
# Write
phdu = fits.PrimaryHDU()
bp_hdu = fits.BinTableHDU(boss_pca_coeff)
bp_hdu.name = 'BOSS_PCA'
bz_hdu = fits.ImageHDU(boss_zQSO)
bz_hdu.name = 'BOSS_z'
sp_hdu = fits.BinTableHDU(sdss_pca_coeff)
sp_hdu.name = 'SDSS_PCA'
sz_hdu = fits.ImageHDU(sdss_zQSO)
sz_hdu.name = 'SDSS_z'
e_hdu = fits.ImageHDU(eigen)
e_hdu.name = 'SDSS_EIGEN'
ew_hdu = fits.ImageHDU(eigen_wave)
ew_hdu.name = 'SDSS_EIGEN_WAVE'
hdulist = fits.HDUList([phdu, bp_hdu, bz_hdu, sp_hdu, sz_hdu,
e_hdu, ew_hdu])
hdulist.writeto(outfil, overwrite=True)
print('Wrote {:s}'.format(outfil))
def tst_random_set():
""" Generate a small set of random templates for testing
:return:
"""
final_wave, final_spec, final_z = desi_qso_templates(
outfil='test_random_set.fits', N_perz=100, seed=12345)
## #################################
## #################################
## TESTING
## #################################
if __name__ == '__main__':
# Run
flg_test = 0
#flg_test += 1 # Mean templates with z,imag
#flg_test += 2 # Mean template fig
#flg_test += 2**2 # v1.1 templates
#flg_test += 2**3 # Check v1.1 templates
#flg_test += 2**4 # PCA file
flg_test += 2**5 # Generate a new random set
# Make Mean templates
if (flg_test % 2) == 1:
zimag = [ (2.3, 19.) ]
mean_templ_zi(zimag)
# Mean template fig
if (flg_test % 2**2) >= 2**1:
fig_desi_templ_z_i(outfil='fig_desi_templ_z_i.pdf')
# Make z=2-4 templates; v1.1
if (flg_test % 2**3) >= 2**2:
aa,bb,cc = desi_qso_templates(outfil='DESI_QSO_Templates_v1.1.fits')
# Check z=0.4-4 templates; v1.1
if (flg_test % 2**4) >= 2**3:
chk_desi_qso_templates(outfil='chk_desi_qso_templates.pdf', N_perz=20)
# Re-package PCA info
if (flg_test % 2**5) >= 2**4:
repackage_coeff()
# Test random generation
if (flg_test % 2**6) >= 2**5:
tst_random_set()
# Done
print('All done')
|
bsd-3-clause
|
ryandougherty/mwa-capstone
|
MWA_Tools/setup.py
|
1
|
5669
|
import os
from glob import glob
from numpy.distutils.core import setup, Command
from distutils.command.install import install as DistutilsInstall
from distutils.sysconfig import get_python_lib,EXEC_PREFIX
from subprocess import call
import subprocess, re
from distutils.command.sdist import sdist as _sdist
from numpy.distutils.core import Extension
#check for required modules
try:
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
import ephem
import pytz
import pylab
import psycopg2
except ImportError,e:
print "ERROR! You are missing a dependency!"
print e
raise
v=pylab.matplotlib.__version__.split('.')
if float('.'.join(v[:2])) < 1.1:
print "WARNING: matplotlib version > 1.1 recommended, and you have ",pylab.matplotlib.__version__
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
packages = ['mwapy', 'mwapy.pb', 'mwapy.obssched', 'mwapy.catalog', 'mwapy.obssched.base', 'mwapy.obssched.utils', 'mwapy.eorpy']
#pythonlib=get_python_lib()
pythonlib=EXEC_PREFIX
print pythonlib
VERSION_PY = """
# This file is originally generated from Git information by running 'setup.py
# version'. Distribution tarballs contain a pre-generated copy of this file.
__version__ = '%s'
__date__ = '%s'
"""
def update_version_py():
if not os.path.isdir(".git"):
print "This does not appear to be a Git repository."
return
try:
#p = subprocess.Popen(["git", "log","-1","--pretty=format:%h"],
# stdout=subprocess.PIPE)
p = subprocess.Popen(["git", "describe"],
stdout=subprocess.PIPE)
except EnvironmentError:
print "unable to run git, leaving mwapy/_version.py alone"
return
stdout = p.communicate()[0]
if p.returncode != 0:
print "unable to run git, leaving mwapy/_version.py alone"
return
# we use tags like "python-ecdsa-0.5", so strip the prefix
#assert stdout.startswith("python-ecdsa-")
#ver = stdout[len("python-ecdsa-"):].strip()
ver=stdout.strip()
try:
p = subprocess.Popen(["git", "log","-1","--pretty=format:%ci"],
stdout=subprocess.PIPE)
except EnvironmentError:
print "unable to run git, leaving mwapy/_version.py alone"
return
stdout = p.communicate()[0]
if p.returncode != 0:
print "unable to run git, leaving mwapy/_version.py alone"
return
date=stdout
f = open("mwapy/_version.py", "w")
f.write(VERSION_PY % (ver,date))
f.close()
print "set mwapy/_version.py to '%s' with date '%s'" % (ver,date)
def get_version():
try:
f = open("mwapy/_version.py")
except EnvironmentError:
return None
for line in f.readlines():
mo = re.match("__version__ = '([^']+)'", line)
if mo:
ver = mo.group(1)
return ver
return None
def get_versiondate():
try:
f = open("mwapy/_version.py")
except EnvironmentError:
return None
for line in f.readlines():
mo = re.match("__date__ = '([^']+)'", line)
if mo:
date = mo.group(1)
return date
return None
class Version(Command):
description = "update _version.py from Git repo"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
update_version_py()
print "Version is now", get_version()
class sdist(_sdist):
def run(self):
update_version_py()
# unless we update this, the sdist command will keep using the old
# version
self.distribution.metadata.version = get_version()
return _sdist.run(self)
setup(
name = "mwapy",
#version = "0.0.2",
version=get_version(),
author = "D. Jacobs",
author_email = "[email protected]",
description = ("Set of tools for using and developing the MWA."),
license = "BSD",
cmdclass={ "version": Version},
keywords = "MWA radio",
url = "http://mwa-lfd.haystack.mit.edu",
py_modules = ['mwa_32T_pb','mwaconfig'],
packages=packages,
package_dir={'mwapy':'mwapy','':'configs'},
scripts=glob('scripts/*')+['CONV2UVFITS/corr2uvfits'] + ['build_lfiles/build_lfiles'] + ['build_lfiles/read_mwac'],
ext_modules=[
# Extension('SLALIB',
# glob('mwapy/CONV2UVFITS/SLALIB_C/*.o')
# ),
# Extension('CONV2UVFITS',
# ['mwapy/CONV2UVFITS/corr2uvfits.c',
# 'mwapy/CONV2UVFITS/uvfits.c'],
# libraries=['cfitsio','sla','m'],
# library_dirs = ['mwapy/CONV2UVFITS/SLALIB_C'],
# include_dirs = ['mwapy/CONV2UVFITS/SLALIB_C'],
# )
# extra_compile_args=[' -O -Wall -D_FILE_OFFSET_BITS=64 -L. '])
Extension(name='mwapy.eorpy.igrf11_python',
sources=['mwapy/eorpy/igrf11_python.f'],
#f2py_options=['--f77flags="-ffixed-line-length-none"']
)
],
long_description=read('README'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
# data_files=[('',['configs/mwa.conf'])],
package_data={'mwapy.pb':['*.txt','*.fits'],
'mwapy.catalog':['*.vot'],'mwapy':['../configs/mwa.conf','../configs/*.def','../configs/*.txt']})
|
gpl-2.0
|
sit23/Isca
|
src/extra/python/scripts/calculate_qflux/calculate_qflux.py
|
2
|
16618
|
"""A method for calculating seasonally-varying qfluxes, as described in Russell et al 1985 DOI:10.1016/0377-0265(85)90022-3"""
import numpy as np
import xarray
from xarray import ufuncs as xruf
import time
from scipy import stats
from mpl_toolkits.basemap import shiftgrid
import area_average as aav
import nc_file_io_xarray as io
import matplotlib.pyplot as plt
import os
import pdb
__author__='Stephen Thomson'
def qflux_calc(dataset, model_params, output_file_name, ice_file_name=None, groupby_name='months'):
if groupby_name=='months':
time_varying_ice = ice_mask_calculation(dataset, dataset.land, ice_file_name)
upper_ocean_heat_content(dataset, model_params, time_varying_ice)
net_surf_energy_flux(dataset, model_params)
deep_ocean_heat_content(dataset, model_params)
ocean_transport(dataset, model_params)
output_dict={'manual_grid_option':False, 'is_thd':False, 'num_years':1., 'time_spacing_days':12, 'file_name':output_file_name+'.nc', 'var_name':output_file_name} #Have specified that var name is the same as file name as this is what the fortran assumes.
elif groupby_name=='dayofyear':
time_varying_ice = ice_mask_calculation(dataset, dataset.land, ice_file_name)
upper_ocean_heat_content(dataset, model_params, time_varying_ice, dayofyear_or_months='dayofyear')
net_surf_energy_flux(dataset, model_params)
deep_ocean_heat_content(dataset, model_params)
ocean_transport(dataset, model_params)
output_dict={'manual_grid_option':False, 'is_thd':False, 'num_years':1., 'time_spacing_days':12, 'file_name':output_file_name+'.nc', 'var_name':output_file_name}
elif groupby_name=='all_time':
time_varying_ice = ice_mask_calculation(dataset, dataset.land, ice_file_name, dayofyear_or_months=groupby_name)
upper_ocean_heat_content(dataset, model_params, time_varying_ice, dayofyear_or_months=groupby_name)
net_surf_energy_flux(dataset, model_params, dayofyear_or_months=groupby_name)
deep_ocean_heat_content(dataset, model_params, dayofyear_or_months=groupby_name)
ocean_transport(dataset, model_params, dayofyear_or_months=groupby_name)
regrid_in_time(dataset, groupby_name)
output_dict={'manual_grid_option':False, 'is_thd':False, 'num_years':1., 'time_spacing_days':12, 'file_name':output_file_name+'.nc', 'var_name':output_file_name}
io.output_nc_file(dataset,'masked_ocean_transport', model_params, output_dict)
def time_gradient(data_in, delta_t):
data_out=np.gradient(data_in, delta_t)
return data_out
def ice_mask_calculation(dataset, land_array, ice_file_name, dayofyear_or_months='months'):
try:
ice_climatology=dataset['ice_conc'].groupby(dayofyear_or_months).mean('time').load()
ice_array=ice_climatology.values
ice_idx=ice_array !=0.
ice_array[ice_idx]=1.0
time_varying_ice=True
print('have gotten ice concentration from climatology')
except KeyError:
try:
ice_data_temp = xarray.open_dataset(ice_file_name, decode_times=False)
albedo_constant_value = ice_data_temp['albedo']
albedo_array = albedo_constant_value.values.squeeze()
ice_idx = np.round(albedo_array, decimals=2) == 0.7
ice_array=np.zeros_like(albedo_array)
ice_array[ice_idx] = 1.0
time_varying_ice = False
print('have gotten ice concentration from one month albedo')
except TypeError:
ice_array=np.zeros_like(land_array)
time_varying_ice = False
print('no ice climatology')
land_ice_mask=np.zeros_like(ice_array)
if time_varying_ice:
dataset['ice_mask']=((dayofyear_or_months+'_ax','lat','lon'),ice_array)
for month_tick in range(12):
land_ice_mask_temp=land_array.values+dataset['ice_mask'].values[month_tick,:,:]
two_idx=land_ice_mask_temp>1.
land_ice_mask_temp[two_idx]=1.0
land_ice_mask[month_tick,:,:]=land_ice_mask_temp
dataset['land_ice_mask']=((dayofyear_or_months+'_ax','lat','lon'),land_ice_mask)
else:
dataset['ice_mask']=(('lat','lon'),ice_array)
land_ice_mask_temp=land_array.values+dataset['ice_mask'].values
two_idx=land_ice_mask_temp>1.
land_ice_mask_temp[two_idx]=1.0
land_ice_mask[:,:]=land_ice_mask_temp
dataset['land_ice_mask']=(('lat','lon'),land_ice_mask)
return time_varying_ice
def upper_ocean_heat_content(dataset, model_params, time_varying_ice, dayofyear_or_months='months'):
"""Calculating upper-ocean heat content assuming a constant mixed layer depth, unlike Russel 1985, who have a seasonally-varying mixed layer depth.
Note that dayofyear_or_months was designed only so that the time derrivatives of surface temperature could be calculated
in a more time-resolved way with dayofyear data, then time-averaged onto months, with months used throughout the rest of
of the code. However, if all_time option is used, this negates the need for a rate-of-change calulation anyway.
"""
print('doing upper ocean heat content and rate of change calculation')
sst_data=dataset['t_surf'].groupby(dayofyear_or_months).mean('time').load()
if dayofyear_or_months=='months':
dataset['sst_clim']=(('months_ax','lat','lon'),sst_data)
sst_clim=dataset['sst_clim']
else:
dataset['sst_clim_'+dayofyear_or_months]=((dayofyear_or_months+'_ax','lat','lon'),sst_data)
sst_clim=dataset['sst_clim_'+dayofyear_or_months]
dataset['sst_clim']=((dayofyear_or_months+'_ax','lat','lon'),sst_data)
# weighted_sst_data=model_params['ocean_rho']*model_params['ocean_cp']*model_params['ml_depth']*sst_data*(1.0-dataset['land'])
weighted_sst_data=model_params['ocean_rho']*model_params['ocean_cp']*model_params['ml_depth']*sst_clim*(1.0-dataset['land'])
shape1=np.shape(weighted_sst_data)
ny=shape1[1]
nx=shape1[2]
d_weighted_sst_data_dt=np.zeros_like(weighted_sst_data)
if dayofyear_or_months=='dayofyear':
delta_t=model_params['day_length']
elif dayofyear_or_months=='months':
delta_t=model_params['day_length']*30.
if dayofyear_or_months!='all_time':
for x in range(nx):
for y in range(ny):
d_weighted_sst_data_dt[:,y,x]=time_gradient(weighted_sst_data[:,y,x], delta_t)
for time_tick in range(shape1[0]):
if time_varying_ice:
d_weighted_sst_data_dt[time_tick,:,:]=d_weighted_sst_data_dt[time_tick,:,:]*(1.0-dataset['land_ice_mask'].values[time_tick,:,:])
else:
d_weighted_sst_data_dt[time_tick,:,:]=d_weighted_sst_data_dt[time_tick,:,:]*(1.0-dataset['land_ice_mask'].values)
if dayofyear_or_months=='dayofyear':
dataset['d_weighted_sst_data_dt_days']=(('dayofyear_ax','lat','lon'), d_weighted_sst_data_dt)
months_on_dayofyear_ax = dataset.months.groupby('dayofyear').mean('time').values
dataset.coords['months_on_dayofyear_ax']=(('dayofyear_ax'),months_on_dayofyear_ax)
monthly_values = dataset['d_weighted_sst_data_dt_days'].groupby('months_on_dayofyear_ax').mean('dayofyear_ax')
dataset['d_weighted_sst_data_dt_months_from_days']=(('months_ax','lat','lon'), monthly_values)
elif dayofyear_or_months=='months':
dataset['d_weighted_sst_data_dt']=(('months_ax','lat','lon'), d_weighted_sst_data_dt)
elif dayofyear_or_months=='all_time':
dataset['d_weighted_sst_data_dt']=(('all_time_ax','lat','lon'), d_weighted_sst_data_dt)
def net_surf_energy_flux(dataset, model_params, dayofyear_or_months='months'):
"""Calculates the net surface energy flux to be used in q-flux calcuation, but also calcualtes a scaling factor such that the annual average of the area-averaged surface flux is zero."""
print('doing net surf energy flux')
flux_sw_data=dataset['flux_sw'].groupby(dayofyear_or_months).mean('time')
dataset['flux_sw_clim']=((dayofyear_or_months+'_ax','lat','lon'),flux_sw_data)
flux_lw_data=dataset['flux_lw'].groupby(dayofyear_or_months).mean('time')
dataset['flux_lw_clim']=((dayofyear_or_months+'_ax','lat','lon'),flux_lw_data)
flux_t_data=dataset['flux_t'].groupby(dayofyear_or_months).mean('time')
dataset['flux_t_clim']=((dayofyear_or_months+'_ax','lat','lon'),flux_t_data)
flux_lhe_data=dataset['flux_lhe'].groupby(dayofyear_or_months).mean('time')
dataset['flux_lhe_clim']=((dayofyear_or_months+'_ax','lat','lon'),flux_lhe_data)
aav.area_average(dataset, 'flux_sw_clim', model_params, land_ocean_all='ocean_non_ice', axis_in=dayofyear_or_months+'_ax')
aav.area_average(dataset, 'flux_lw_clim', model_params, land_ocean_all='ocean_non_ice', axis_in=dayofyear_or_months+'_ax')
aav.area_average(dataset, 'sigma_sb_sst_clim', model_params, land_ocean_all='ocean_non_ice', axis_in=dayofyear_or_months+'_ax')
aav.area_average(dataset, 'flux_t_clim', model_params, land_ocean_all='ocean_non_ice', axis_in=dayofyear_or_months+'_ax')
aav.area_average(dataset, 'flux_lhe_clim', model_params, land_ocean_all='ocean_non_ice', axis_in=dayofyear_or_months+'_ax')
scaling_factor_old=(((dataset['sigma_sb_sst_clim_area_av_ocean_non_ice']+dataset['flux_t_clim_area_av_ocean_non_ice']+dataset['flux_lhe_clim_area_av_ocean_non_ice']-dataset['flux_lw_clim_area_av_ocean_non_ice'])/dataset['flux_sw_clim_area_av_ocean_non_ice'])).mean(dayofyear_or_months+'_ax')
scaling_factor=(((dataset['sigma_sb_sst_clim_area_av_ocean_non_ice'].mean(dayofyear_or_months+'_ax')+dataset['flux_t_clim_area_av_ocean_non_ice'].mean(dayofyear_or_months+'_ax')+dataset['flux_lhe_clim_area_av_ocean_non_ice'].mean(dayofyear_or_months+'_ax')-dataset['flux_lw_clim_area_av_ocean_non_ice'].mean(dayofyear_or_months+'_ax'))/dataset['flux_sw_clim_area_av_ocean_non_ice'].mean(dayofyear_or_months+'_ax')))
# scaling_factor=1.0
print('using scale factor for SW of '+ str(scaling_factor))
net_surf_energy_fl=(scaling_factor*dataset['flux_sw_clim']+dataset['flux_lw_clim']-(model_params['sigma_sb']*dataset['sst_clim']**4.0)-dataset['flux_t_clim']-dataset['flux_lhe_clim'])*(1.0-dataset['land_ice_mask'])
dataset['net_surf_energy_fl']=((dayofyear_or_months+'_ax','lat','lon'), net_surf_energy_fl)
aav.area_average(dataset, 'net_surf_energy_fl', model_params, land_ocean_all='ocean_non_ice', axis_in=dayofyear_or_months+'_ax')
def deep_ocean_heat_content(dataset, model_params, dayofyear_or_months='months'):
print('doing deep ocean heat content')
aav.area_average(dataset, 'd_weighted_sst_data_dt', model_params, land_ocean_all='ocean_non_ice', axis_in=dayofyear_or_months+'_ax')
d_deep_ocean_dt=(dataset['net_surf_energy_fl_area_av_ocean_non_ice']-dataset['d_weighted_sst_data_dt_area_av_ocean_non_ice'])
dataset['d_deep_ocean_dt']=(dataset['net_surf_energy_fl_area_av_ocean_non_ice'].dims, d_deep_ocean_dt)
def ocean_transport(dataset, model_params, dayofyear_or_months='months'):
ocean_transport=dataset['d_weighted_sst_data_dt']+dataset['d_deep_ocean_dt']-dataset['net_surf_energy_fl']
masked_ocean_transport=ocean_transport*(1.0-dataset['land_ice_mask'])
masked_land_transport=ocean_transport*(dataset['land_ice_mask'])
dataset['ocean_transport']=((dayofyear_or_months+'_ax','lat','lon'), ocean_transport)
dataset['masked_ocean_transport']=((dayofyear_or_months+'_ax','lat','lon'), masked_ocean_transport)
dataset['masked_land_transport']=((dayofyear_or_months+'_ax','lat','lon'), masked_land_transport)
def regrid_in_time(dataset, groupby_name):
""" Simple routine to repeat arrays in all_time-averaging situations, so that monthly data still output."""
ocean_heat_flux_shape = dataset['masked_ocean_transport'].shape
if ocean_heat_flux_shape[0]!=12 and groupby_name=='all_time':
dataset_to_repeat = dataset['masked_ocean_transport'].load()
dataset_to_output = np.zeros((12, ocean_heat_flux_shape[1], ocean_heat_flux_shape[2]))
for i in range(12):
dataset_to_output[i,...] = dataset_to_repeat
dataset['masked_ocean_transport'] = (('months_ax','lat','lon'), dataset_to_output)
def check_surface_flux_dims(dataset):
''' This surface flux checker is designed to decide if we're using grey rad or not. If we're using grey rad then the definition
of flux_sw and flux_lw are different to RRTM. The script was written to use RRTM output, so it changes variable names etc to be
equivalent to RRTM definitions.
'''
flux_dims = dataset['flux_sw'].dims
if 'phalf' in flux_dims:
dataset.rename({'flux_sw':'flux_sw'+'_3d'}, inplace=True)
max_pressure = dataset.phalf.max()
flux_at_bottom_phalf_level = dataset['flux_sw_3d'].sel(phalf=max_pressure)
new_dims = ('time','lat','lon')
dataset['flux_sw'] = (new_dims, flux_at_bottom_phalf_level)
flux_dims_lw = dataset['flux_lw'].dims
if 'phalf' in flux_dims_lw:
dataset.rename({'flux_lw':'flux_lw'+'_3d'}, inplace=True)
try:
# Script assumes flux_lw is the surface lw down (i.e. not a net flux). This is the case with RRTM, but with
# grey radiation 'flux_lw' is the net lw flux in 3D. So we take the lwdn_sfc output from grey rad and rename it
# flux_lw.
dataset['lwdn_sfc']
dataset.rename({'lwdn_sfc':'flux_lw'}, inplace=True)
except:
#If lwdn_sfc is not available, then we re-calculate it from flux_lw by adding back sigma*t_surf**4, then call it flux_lw
print('lwdn_sfc not present when using grey radiation, so re-calculating it from flux_lw.')
max_pressure = dataset.phalf.max()
lwdn_sfc = dataset.flux_lw_3d.sel(phalf=max_pressure) + sigma_sb*dataset.t_surf**4.
new_dims = ('time','lat','lon')
dataset['flux_lw'] = (new_dims, lwdn_sfc)
if __name__ == "__main__":
import nc_file_io_xarray as io
import set_and_get_params as sagp
try:
GFDL_BASE = os.environ['GFDL_BASE']
GFDL_WORK = os.environ['GFDL_WORK']
GFDL_DATA = os.environ['GFDL_DATA']
except Exception as e:
print('Environment variables GFDL_BASE, GFDL_WORK, GFDL_DATA must be set')
exit(0)
input_dir=GFDL_BASE
base_dir=GFDL_DATA
land_file='input/land.nc'
base_exp_name='annual_mean_ice_post_princeton_fixed_sst/' #Folder containing the python script and input files that ran the experiment
exp_name='annual_mean_ice_post_princeton_fixed_sst_1' #Folder within the data directory where the files can be found
# ice_file_name=base_dir+'annual_mean_ice_albedo_change_test_mk2_4320_dt_rad_4/'+'run360/'+'atmos_monthly.nc'
ice_file_name = '/scratch/sit204/data_isca/realistic_continents_fixed_sst_test_experiment_albedo/run0001/atmos_daily.nc'
output_file_name='ami_test_interp' #Proposed name of your output qflux file. Will also be qflux field name in q-flux netcdf file as the fortran assumes file-name = field name. No need to add '.nc' or any file paths in this variable as otherwise they will end up in the field name too. Output file will be stored in the same directory as this script.
start_file=240
end_file=360
land_present=True
use_interpolated_pressure_level_data = False #Conditions the script on whether to expect data on sigma levels (if False) or pressure levels (if True). Script should be insensitive to this choice if both sets of files exist.
#Set time increments of input files (e.g. `monthly` for `atmos_monthly` files.
avg_or_daily='monthly'
#Set the time frequency of output data. Valid options are 'months', 'all_time' or 'dayofyear'.
time_divisions_of_qflux_to_be_calculated='months'
model_params = sagp.model_params_set(input_dir, delta_t=720., ml_depth=20., res=42)
dataset, time_arr, size_list = io.read_data( base_dir,exp_name,start_file,end_file,avg_or_daily,use_interpolated_pressure_level_data)
land_array, topo_array = io.read_land(input_dir,base_exp_name,land_present,use_interpolated_pressure_level_data,size_list,land_file)
dataset['land'] = (('lat','lon'),land_array)
check_surface_flux_dims(dataset)
qflux_calc(dataset, model_params, output_file_name, ice_file_name, groupby_name=time_divisions_of_qflux_to_be_calculated)
|
gpl-3.0
|
klusta-team/klustaviewa
|
klustaviewa/views/tests/test_waveformview.py
|
2
|
1385
|
"""Unit tests for waveform view."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import numpy as np
import numpy.random as rnd
import pandas as pd
from klustaviewa.views.tests.mock_data import (setup, teardown,
nspikes, nclusters, nsamples, nchannels, fetdim)
from kwiklib.dataio import KlustersLoader
from kwiklib.dataio.selection import select
from kwiklib.dataio.tools import check_dtype, check_shape
from klustaviewa import USERPREF
from klustaviewa.views import WaveformView
from klustaviewa.views.tests.utils import show_view, get_data
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_waveformview():
keys = ('waveforms,clusters,cluster_colors,clusters_selected,masks,'
'geometrical_positions'
).split(',')
data = get_data()
kwargs = {k: data[k] for k in keys}
operators = [
lambda self: self.view.toggle_mask(),
lambda self: (self.close()
if USERPREF['test_auto_close'] != False else None),
]
# Show the view.
show_view(WaveformView, operators=operators, **kwargs)
|
bsd-3-clause
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/pandas/stats/ols.py
|
7
|
40448
|
"""
Ordinary least squares regression
"""
# pylint: disable-msg=W0201
# flake8: noqa
from pandas.compat import zip, range, StringIO
from itertools import starmap
from pandas import compat
import numpy as np
from pandas.core.api import DataFrame, Series, isnull
from pandas.core.base import StringMixin
from pandas.types.common import _ensure_float64
from pandas.core.index import MultiIndex
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly
import pandas.stats.common as scom
import pandas.stats.math as math
import pandas.stats.moments as moments
_FP_ERR = 1e-8
class OLS(StringMixin):
"""
Runs a full sample ordinary least squares regression.
Parameters
----------
y : Series
x : Series, DataFrame, dict of Series
intercept : bool
True if you want an intercept.
weights : array-like, optional
1d array of weights. If you supply 1/W then the variables are pre-
multiplied by 1/sqrt(W). If no weights are supplied the default value
is 1 and WLS reults are the same as OLS.
nw_lags : None or int
Number of Newey-West lags.
nw_overlap : boolean, default False
Assume data is overlapping when computing Newey-West estimator
"""
_panel_model = False
def __init__(self, y, x, intercept=True, weights=None, nw_lags=None,
nw_overlap=False):
import warnings
warnings.warn("The pandas.stats.ols module is deprecated and will be "
"removed in a future version. We refer to external packages "
"like statsmodels, see some examples here: "
"http://www.statsmodels.org/stable/regression.html",
FutureWarning, stacklevel=4)
try:
import statsmodels.api as sm
except ImportError:
import scikits.statsmodels.api as sm
self._x_orig = x
self._y_orig = y
self._weights_orig = weights
self._intercept = intercept
self._nw_lags = nw_lags
self._nw_overlap = nw_overlap
(self._y, self._x, self._weights, self._x_filtered,
self._index, self._time_has_obs) = self._prepare_data()
if self._weights is not None:
self._x_trans = self._x.mul(np.sqrt(self._weights), axis=0)
self._y_trans = self._y * np.sqrt(self._weights)
self.sm_ols = sm.WLS(self._y.get_values(),
self._x.get_values(),
weights=self._weights.values).fit()
else:
self._x_trans = self._x
self._y_trans = self._y
self.sm_ols = sm.OLS(self._y.get_values(),
self._x.get_values()).fit()
def _prepare_data(self):
"""
Cleans the input for single OLS.
Parameters
----------
lhs: Series
Dependent variable in the regression.
rhs: dict, whose values are Series, DataFrame, or dict
Explanatory variables of the regression.
Returns
-------
Series, DataFrame
Cleaned lhs and rhs
"""
(filt_lhs, filt_rhs, filt_weights,
pre_filt_rhs, index, valid) = _filter_data(self._y_orig, self._x_orig,
self._weights_orig)
if self._intercept:
filt_rhs['intercept'] = 1.
pre_filt_rhs['intercept'] = 1.
if hasattr(filt_weights, 'to_dense'):
filt_weights = filt_weights.to_dense()
return (filt_lhs, filt_rhs, filt_weights,
pre_filt_rhs, index, valid)
@property
def nobs(self):
return self._nobs
@property
def _nobs(self):
return len(self._y)
@property
def nw_lags(self):
return self._nw_lags
@property
def x(self):
"""Returns the filtered x used in the regression."""
return self._x
@property
def y(self):
"""Returns the filtered y used in the regression."""
return self._y
@cache_readonly
def _beta_raw(self):
"""Runs the regression and returns the beta."""
return self.sm_ols.params
@cache_readonly
def beta(self):
"""Returns the betas in Series form."""
return Series(self._beta_raw, index=self._x.columns)
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
return math.rank(self._x.values)
@cache_readonly
def df(self):
"""Returns the degrees of freedom.
This equals the rank of the X matrix.
"""
return self._df_raw
@cache_readonly
def _df_model_raw(self):
"""Returns the raw model degrees of freedom."""
return self.sm_ols.df_model
@cache_readonly
def df_model(self):
"""Returns the degrees of freedom of the model."""
return self._df_model_raw
@cache_readonly
def _df_resid_raw(self):
"""Returns the raw residual degrees of freedom."""
return self.sm_ols.df_resid
@cache_readonly
def df_resid(self):
"""Returns the degrees of freedom of the residuals."""
return self._df_resid_raw
@cache_readonly
def _f_stat_raw(self):
"""Returns the raw f-stat value."""
from scipy.stats import f
cols = self._x.columns
if self._nw_lags is None:
F = self._r2_raw / (self._r2_raw - self._r2_adj_raw)
q = len(cols)
if 'intercept' in cols:
q -= 1
shape = q, self.df_resid
p_value = 1 - f.cdf(F, shape[0], shape[1])
return F, shape, p_value
k = len(cols)
R = np.eye(k)
r = np.zeros((k, 1))
try:
intercept = cols.get_loc('intercept')
R = np.concatenate((R[0: intercept], R[intercept + 1:]))
r = np.concatenate((r[0: intercept], r[intercept + 1:]))
except KeyError:
# no intercept
pass
return math.calc_F(R, r, self._beta_raw, self._var_beta_raw,
self._nobs, self.df)
@cache_readonly
def f_stat(self):
"""Returns the f-stat value."""
return f_stat_to_dict(self._f_stat_raw)
def f_test(self, hypothesis):
"""Runs the F test, given a joint hypothesis. The hypothesis is
represented by a collection of equations, in the form
A*x_1+B*x_2=C
You must provide the coefficients even if they're 1. No spaces.
The equations can be passed as either a single string or a
list of strings.
Examples
--------
o = ols(...)
o.f_test('1*x1+2*x2=0,1*x3=0')
o.f_test(['1*x1+2*x2=0','1*x3=0'])
"""
x_names = self._x.columns
R = []
r = []
if isinstance(hypothesis, str):
eqs = hypothesis.split(',')
elif isinstance(hypothesis, list):
eqs = hypothesis
else: # pragma: no cover
raise Exception('hypothesis must be either string or list')
for equation in eqs:
row = np.zeros(len(x_names))
lhs, rhs = equation.split('=')
for s in lhs.split('+'):
ss = s.split('*')
coeff = float(ss[0])
x_name = ss[1]
if x_name not in x_names:
raise Exception('no coefficient named %s' % x_name)
idx = x_names.get_loc(x_name)
row[idx] = coeff
rhs = float(rhs)
R.append(row)
r.append(rhs)
R = np.array(R)
q = len(r)
r = np.array(r).reshape(q, 1)
result = math.calc_F(R, r, self._beta_raw, self._var_beta_raw,
self._nobs, self.df)
return f_stat_to_dict(result)
@cache_readonly
def _p_value_raw(self):
"""Returns the raw p values."""
from scipy.stats import t
return 2 * t.sf(np.fabs(self._t_stat_raw),
self._df_resid_raw)
@cache_readonly
def p_value(self):
"""Returns the p values."""
return Series(self._p_value_raw, index=self.beta.index)
@cache_readonly
def _r2_raw(self):
"""Returns the raw r-squared values."""
if self._use_centered_tss:
return 1 - self.sm_ols.ssr / self.sm_ols.centered_tss
else:
return 1 - self.sm_ols.ssr / self.sm_ols.uncentered_tss
@property
def _use_centered_tss(self):
# has_intercept = np.abs(self._resid_raw.sum()) < _FP_ERR
return self._intercept
@cache_readonly
def r2(self):
"""Returns the r-squared values."""
return self._r2_raw
@cache_readonly
def _r2_adj_raw(self):
"""Returns the raw r-squared adjusted values."""
return self.sm_ols.rsquared_adj
@cache_readonly
def r2_adj(self):
"""Returns the r-squared adjusted values."""
return self._r2_adj_raw
@cache_readonly
def _resid_raw(self):
"""Returns the raw residuals."""
return self.sm_ols.resid
@cache_readonly
def resid(self):
"""Returns the residuals."""
return Series(self._resid_raw, index=self._x.index)
@cache_readonly
def _rmse_raw(self):
"""Returns the raw rmse values."""
return np.sqrt(self.sm_ols.mse_resid)
@cache_readonly
def rmse(self):
"""Returns the rmse value."""
return self._rmse_raw
@cache_readonly
def _std_err_raw(self):
"""Returns the raw standard err values."""
return np.sqrt(np.diag(self._var_beta_raw))
@cache_readonly
def std_err(self):
"""Returns the standard err values of the betas."""
return Series(self._std_err_raw, index=self.beta.index)
@cache_readonly
def _t_stat_raw(self):
"""Returns the raw t-stat value."""
return self._beta_raw / self._std_err_raw
@cache_readonly
def t_stat(self):
"""Returns the t-stat values of the betas."""
return Series(self._t_stat_raw, index=self.beta.index)
@cache_readonly
def _var_beta_raw(self):
"""
Returns the raw covariance of beta.
"""
x = self._x.values
y = self._y.values
xx = np.dot(x.T, x)
if self._nw_lags is None:
return math.inv(xx) * (self._rmse_raw ** 2)
else:
resid = y - np.dot(x, self._beta_raw)
m = (x.T * resid).T
xeps = math.newey_west(m, self._nw_lags, self._nobs, self._df_raw,
self._nw_overlap)
xx_inv = math.inv(xx)
return np.dot(xx_inv, np.dot(xeps, xx_inv))
@cache_readonly
def var_beta(self):
"""Returns the variance-covariance matrix of beta."""
return DataFrame(self._var_beta_raw, index=self.beta.index,
columns=self.beta.index)
@cache_readonly
def _y_fitted_raw(self):
"""Returns the raw fitted y values."""
if self._weights is None:
X = self._x_filtered.values
else:
# XXX
return self.sm_ols.fittedvalues
b = self._beta_raw
return np.dot(X, b)
@cache_readonly
def y_fitted(self):
"""Returns the fitted y values. This equals BX."""
if self._weights is None:
index = self._x_filtered.index
orig_index = index
else:
index = self._y.index
orig_index = self._y_orig.index
result = Series(self._y_fitted_raw, index=index)
return result.reindex(orig_index)
@cache_readonly
def _y_predict_raw(self):
"""Returns the raw predicted y values."""
return self._y_fitted_raw
@cache_readonly
def y_predict(self):
"""Returns the predicted y values.
For in-sample, this is same as y_fitted."""
return self.y_fitted
def predict(self, beta=None, x=None, fill_value=None,
fill_method=None, axis=0):
"""
Parameters
----------
beta : Series
x : Series or DataFrame
fill_value : scalar or dict, default None
fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
axis : {0, 1}, default 0
See DataFrame.fillna for more details
Notes
-----
1. If both fill_value and fill_method are None then NaNs are dropped
(this is the default behavior)
2. An intercept will be automatically added to the new_y_values if
the model was fitted using an intercept
Returns
-------
Series of predicted values
"""
if beta is None and x is None:
return self.y_predict
if beta is None:
beta = self.beta
else:
beta = beta.reindex(self.beta.index)
if isnull(beta).any():
raise ValueError('Must supply betas for same variables')
if x is None:
x = self._x
orig_x = x
else:
orig_x = x
if fill_value is None and fill_method is None:
x = x.dropna(how='any')
else:
x = x.fillna(value=fill_value, method=fill_method, axis=axis)
if isinstance(x, Series):
x = DataFrame({'x': x})
if self._intercept:
x['intercept'] = 1.
x = x.reindex(columns=self._x.columns)
rs = np.dot(x.values, beta.values)
return Series(rs, x.index).reindex(orig_x.index)
RESULT_FIELDS = ['r2', 'r2_adj', 'df', 'df_model', 'df_resid', 'rmse',
'f_stat', 'beta', 'std_err', 't_stat', 'p_value', 'nobs']
@cache_readonly
def _results(self):
results = {}
for result in self.RESULT_FIELDS:
results[result] = getattr(self, result)
return results
@cache_readonly
def _coef_table(self):
buf = StringIO()
buf.write('%14s %10s %10s %10s %10s %10s %10s\n' %
('Variable', 'Coef', 'Std Err', 't-stat',
'p-value', 'CI 2.5%', 'CI 97.5%'))
buf.write(scom.banner(''))
coef_template = '\n%14s %10.4f %10.4f %10.2f %10.4f %10.4f %10.4f'
results = self._results
beta = results['beta']
for i, name in enumerate(beta.index):
if i and not (i % 5):
buf.write('\n' + scom.banner(''))
std_err = results['std_err'][name]
CI1 = beta[name] - 1.96 * std_err
CI2 = beta[name] + 1.96 * std_err
t_stat = results['t_stat'][name]
p_value = results['p_value'][name]
line = coef_template % (name,
beta[name], std_err, t_stat, p_value, CI1, CI2)
buf.write(line)
if self.nw_lags is not None:
buf.write('\n')
buf.write('*** The calculations are Newey-West '
'adjusted with lags %5d\n' % self.nw_lags)
return buf.getvalue()
@cache_readonly
def summary_as_matrix(self):
"""Returns the formatted results of the OLS as a DataFrame."""
results = self._results
beta = results['beta']
data = {'beta': results['beta'],
't-stat': results['t_stat'],
'p-value': results['p_value'],
'std err': results['std_err']}
return DataFrame(data, beta.index).T
@cache_readonly
def summary(self):
"""
This returns the formatted result of the OLS computation
"""
template = """
%(bannerTop)s
Formula: Y ~ %(formula)s
Number of Observations: %(nobs)d
Number of Degrees of Freedom: %(df)d
R-squared: %(r2)10.4f
Adj R-squared: %(r2_adj)10.4f
Rmse: %(rmse)10.4f
F-stat %(f_stat_shape)s: %(f_stat)10.4f, p-value: %(f_stat_p_value)10.4f
Degrees of Freedom: model %(df_model)d, resid %(df_resid)d
%(bannerCoef)s
%(coef_table)s
%(bannerEnd)s
"""
coef_table = self._coef_table
results = self._results
f_stat = results['f_stat']
bracketed = ['<%s>' % str(c) for c in results['beta'].index]
formula = StringIO()
formula.write(bracketed[0])
tot = len(bracketed[0])
line = 1
for coef in bracketed[1:]:
tot = tot + len(coef) + 3
if tot // (68 * line):
formula.write('\n' + ' ' * 12)
line += 1
formula.write(' + ' + coef)
params = {
'bannerTop': scom.banner('Summary of Regression Analysis'),
'bannerCoef': scom.banner('Summary of Estimated Coefficients'),
'bannerEnd': scom.banner('End of Summary'),
'formula': formula.getvalue(),
'r2': results['r2'],
'r2_adj': results['r2_adj'],
'nobs': results['nobs'],
'df': results['df'],
'df_model': results['df_model'],
'df_resid': results['df_resid'],
'coef_table': coef_table,
'rmse': results['rmse'],
'f_stat': f_stat['f-stat'],
'f_stat_shape': '(%d, %d)' % (f_stat['DF X'], f_stat['DF Resid']),
'f_stat_p_value': f_stat['p-value'],
}
return template % params
def __unicode__(self):
return self.summary
@cache_readonly
def _time_obs_count(self):
# XXX
return self._time_has_obs.astype(int)
@property
def _total_times(self):
return self._time_has_obs.sum()
class MovingOLS(OLS):
"""
Runs a rolling/expanding simple OLS.
Parameters
----------
y : Series
x : Series, DataFrame, or dict of Series
weights : array-like, optional
1d array of weights. If None, equivalent to an unweighted OLS.
window_type : {'full sample', 'rolling', 'expanding'}
Default expanding
window : int
size of window (for rolling/expanding OLS)
min_periods : int
Threshold of non-null data points to require.
If None, defaults to size of window for window_type='rolling' and 1
otherwise
intercept : bool
True if you want an intercept.
nw_lags : None or int
Number of Newey-West lags.
nw_overlap : boolean, default False
Assume data is overlapping when computing Newey-West estimator
"""
def __init__(self, y, x, weights=None, window_type='expanding',
window=None, min_periods=None, intercept=True,
nw_lags=None, nw_overlap=False):
self._args = dict(intercept=intercept, nw_lags=nw_lags,
nw_overlap=nw_overlap)
OLS.__init__(self, y=y, x=x, weights=weights, **self._args)
self._set_window(window_type, window, min_periods)
def _set_window(self, window_type, window, min_periods):
self._window_type = scom._get_window_type(window_type)
if self._is_rolling:
if window is None:
raise AssertionError("Must specify window.")
if min_periods is None:
min_periods = window
else:
window = len(self._x)
if min_periods is None:
min_periods = 1
self._window = int(window)
self._min_periods = min_periods
#------------------------------------------------------------------------------
# "Public" results
@cache_readonly
def beta(self):
"""Returns the betas in Series/DataFrame form."""
return DataFrame(self._beta_raw,
index=self._result_index,
columns=self._x.columns)
@cache_readonly
def rank(self):
return Series(self._rank_raw, index=self._result_index)
@cache_readonly
def df(self):
"""Returns the degrees of freedom."""
return Series(self._df_raw, index=self._result_index)
@cache_readonly
def df_model(self):
"""Returns the model degrees of freedom."""
return Series(self._df_model_raw, index=self._result_index)
@cache_readonly
def df_resid(self):
"""Returns the residual degrees of freedom."""
return Series(self._df_resid_raw, index=self._result_index)
@cache_readonly
def f_stat(self):
"""Returns the f-stat value."""
f_stat_dicts = dict((date, f_stat_to_dict(f_stat))
for date, f_stat in zip(self.beta.index,
self._f_stat_raw))
return DataFrame(f_stat_dicts).T
def f_test(self, hypothesis):
raise NotImplementedError('must use full sample')
@cache_readonly
def forecast_mean(self):
return Series(self._forecast_mean_raw, index=self._result_index)
@cache_readonly
def forecast_vol(self):
return Series(self._forecast_vol_raw, index=self._result_index)
@cache_readonly
def p_value(self):
"""Returns the p values."""
cols = self.beta.columns
return DataFrame(self._p_value_raw, columns=cols,
index=self._result_index)
@cache_readonly
def r2(self):
"""Returns the r-squared values."""
return Series(self._r2_raw, index=self._result_index)
@cache_readonly
def resid(self):
"""Returns the residuals."""
return Series(self._resid_raw[self._valid_obs_labels],
index=self._result_index)
@cache_readonly
def r2_adj(self):
"""Returns the r-squared adjusted values."""
index = self.r2.index
return Series(self._r2_adj_raw, index=index)
@cache_readonly
def rmse(self):
"""Returns the rmse values."""
return Series(self._rmse_raw, index=self._result_index)
@cache_readonly
def std_err(self):
"""Returns the standard err values."""
return DataFrame(self._std_err_raw, columns=self.beta.columns,
index=self._result_index)
@cache_readonly
def t_stat(self):
"""Returns the t-stat value."""
return DataFrame(self._t_stat_raw, columns=self.beta.columns,
index=self._result_index)
@cache_readonly
def var_beta(self):
"""Returns the covariance of beta."""
result = {}
result_index = self._result_index
for i in range(len(self._var_beta_raw)):
dm = DataFrame(self._var_beta_raw[i], columns=self.beta.columns,
index=self.beta.columns)
result[result_index[i]] = dm
return Panel.from_dict(result, intersect=False)
@cache_readonly
def y_fitted(self):
"""Returns the fitted y values."""
return Series(self._y_fitted_raw[self._valid_obs_labels],
index=self._result_index)
@cache_readonly
def y_predict(self):
"""Returns the predicted y values."""
return Series(self._y_predict_raw[self._valid_obs_labels],
index=self._result_index)
#------------------------------------------------------------------------------
# "raw" attributes, calculations
@property
def _is_rolling(self):
return self._window_type == 'rolling'
@cache_readonly
def _beta_raw(self):
"""Runs the regression and returns the beta."""
beta, indices, mask = self._rolling_ols_call
return beta[indices]
@cache_readonly
def _result_index(self):
return self._index[self._valid_indices]
@property
def _valid_indices(self):
return self._rolling_ols_call[1]
@cache_readonly
def _rolling_ols_call(self):
return self._calc_betas(self._x_trans, self._y_trans)
def _calc_betas(self, x, y):
N = len(self._index)
K = len(self._x.columns)
betas = np.empty((N, K), dtype=float)
betas[:] = np.NaN
valid = self._time_has_obs
enough = self._enough_obs
window = self._window
# Use transformed (demeaned) Y, X variables
cum_xx = self._cum_xx(x)
cum_xy = self._cum_xy(x, y)
for i in range(N):
if not valid[i] or not enough[i]:
continue
xx = cum_xx[i]
xy = cum_xy[i]
if self._is_rolling and i >= window:
xx = xx - cum_xx[i - window]
xy = xy - cum_xy[i - window]
betas[i] = math.solve(xx, xy)
mask = ~np.isnan(betas).any(axis=1)
have_betas = np.arange(N)[mask]
return betas, have_betas, mask
def _rolling_rank(self):
dates = self._index
window = self._window
ranks = np.empty(len(dates), dtype=float)
ranks[:] = np.NaN
for i, date in enumerate(dates):
if self._is_rolling and i >= window:
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
x_slice = self._x.truncate(before=prior_date, after=date).values
if len(x_slice) == 0:
continue
ranks[i] = math.rank(x_slice)
return ranks
def _cum_xx(self, x):
dates = self._index
K = len(x.columns)
valid = self._time_has_obs
cum_xx = []
slicer = lambda df, dt: df.truncate(dt, dt).values
if not self._panel_model:
_get_index = x.index.get_loc
def slicer(df, dt):
i = _get_index(dt)
return df.values[i:i + 1, :]
last = np.zeros((K, K))
for i, date in enumerate(dates):
if not valid[i]:
cum_xx.append(last)
continue
x_slice = slicer(x, date)
xx = last = last + np.dot(x_slice.T, x_slice)
cum_xx.append(xx)
return cum_xx
def _cum_xy(self, x, y):
dates = self._index
valid = self._time_has_obs
cum_xy = []
x_slicer = lambda df, dt: df.truncate(dt, dt).values
if not self._panel_model:
_get_index = x.index.get_loc
def x_slicer(df, dt):
i = _get_index(dt)
return df.values[i:i + 1]
_y_get_index = y.index.get_loc
_values = y.values
if isinstance(y.index, MultiIndex):
def y_slicer(df, dt):
loc = _y_get_index(dt)
return _values[loc]
else:
def y_slicer(df, dt):
i = _y_get_index(dt)
return _values[i:i + 1]
last = np.zeros(len(x.columns))
for i, date in enumerate(dates):
if not valid[i]:
cum_xy.append(last)
continue
x_slice = x_slicer(x, date)
y_slice = y_slicer(y, date)
xy = last = last + np.dot(x_slice.T, y_slice)
cum_xy.append(xy)
return cum_xy
@cache_readonly
def _rank_raw(self):
rank = self._rolling_rank()
return rank[self._valid_indices]
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
return self._rank_raw
@cache_readonly
def _df_model_raw(self):
"""Returns the raw model degrees of freedom."""
return self._df_raw - 1
@cache_readonly
def _df_resid_raw(self):
"""Returns the raw residual degrees of freedom."""
return self._nobs - self._df_raw
@cache_readonly
def _f_stat_raw(self):
"""Returns the raw f-stat value."""
from scipy.stats import f
items = self.beta.columns
nobs = self._nobs
df = self._df_raw
df_resid = nobs - df
# var_beta has not been newey-west adjusted
if self._nw_lags is None:
F = self._r2_raw / (self._r2_raw - self._r2_adj_raw)
q = len(items)
if 'intercept' in items:
q -= 1
def get_result_simple(Fst, d):
return Fst, (q, d), 1 - f.cdf(Fst, q, d)
# Compute the P-value for each pair
result = starmap(get_result_simple, zip(F, df_resid))
return list(result)
K = len(items)
R = np.eye(K)
r = np.zeros((K, 1))
try:
intercept = items.get_loc('intercept')
R = np.concatenate((R[0: intercept], R[intercept + 1:]))
r = np.concatenate((r[0: intercept], r[intercept + 1:]))
except KeyError:
# no intercept
pass
def get_result(beta, vcov, n, d):
return math.calc_F(R, r, beta, vcov, n, d)
results = starmap(get_result,
zip(self._beta_raw, self._var_beta_raw, nobs, df))
return list(results)
@cache_readonly
def _p_value_raw(self):
"""Returns the raw p values."""
from scipy.stats import t
result = [2 * t.sf(a, b)
for a, b in zip(np.fabs(self._t_stat_raw),
self._df_resid_raw)]
return np.array(result)
@cache_readonly
def _resid_stats(self):
uncentered_sst = []
sst = []
sse = []
Yreg = self._y
Y = self._y_trans
X = self._x_trans
weights = self._weights
dates = self._index
window = self._window
for n, index in enumerate(self._valid_indices):
if self._is_rolling and index >= window:
prior_date = dates[index - window + 1]
else:
prior_date = dates[0]
date = dates[index]
beta = self._beta_raw[n]
X_slice = X.truncate(before=prior_date, after=date).values
Y_slice = _y_converter(Y.truncate(before=prior_date, after=date))
resid = Y_slice - np.dot(X_slice, beta)
if weights is not None:
Y_slice = _y_converter(Yreg.truncate(before=prior_date,
after=date))
weights_slice = weights.truncate(prior_date, date)
demeaned = Y_slice - np.average(Y_slice, weights=weights_slice)
SS_total = (weights_slice * demeaned ** 2).sum()
else:
SS_total = ((Y_slice - Y_slice.mean()) ** 2).sum()
SS_err = (resid ** 2).sum()
SST_uncentered = (Y_slice ** 2).sum()
sse.append(SS_err)
sst.append(SS_total)
uncentered_sst.append(SST_uncentered)
return {
'sse': np.array(sse),
'centered_tss': np.array(sst),
'uncentered_tss': np.array(uncentered_sst),
}
@cache_readonly
def _rmse_raw(self):
"""Returns the raw rmse values."""
return np.sqrt(self._resid_stats['sse'] / self._df_resid_raw)
@cache_readonly
def _r2_raw(self):
rs = self._resid_stats
if self._use_centered_tss:
return 1 - rs['sse'] / rs['centered_tss']
else:
return 1 - rs['sse'] / rs['uncentered_tss']
@cache_readonly
def _r2_adj_raw(self):
"""Returns the raw r-squared adjusted values."""
nobs = self._nobs
factors = (nobs - 1) / (nobs - self._df_raw)
return 1 - (1 - self._r2_raw) * factors
@cache_readonly
def _resid_raw(self):
"""Returns the raw residuals."""
return (self._y.values - self._y_fitted_raw)
@cache_readonly
def _std_err_raw(self):
"""Returns the raw standard err values."""
results = []
for i in range(len(self._var_beta_raw)):
results.append(np.sqrt(np.diag(self._var_beta_raw[i])))
return np.array(results)
@cache_readonly
def _t_stat_raw(self):
"""Returns the raw t-stat value."""
return self._beta_raw / self._std_err_raw
@cache_readonly
def _var_beta_raw(self):
"""Returns the raw covariance of beta."""
x = self._x_trans
y = self._y_trans
dates = self._index
nobs = self._nobs
rmse = self._rmse_raw
beta = self._beta_raw
df = self._df_raw
window = self._window
cum_xx = self._cum_xx(self._x)
results = []
for n, i in enumerate(self._valid_indices):
xx = cum_xx[i]
date = dates[i]
if self._is_rolling and i >= window:
xx = xx - cum_xx[i - window]
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
x_slice = x.truncate(before=prior_date, after=date)
y_slice = y.truncate(before=prior_date, after=date)
xv = x_slice.values
yv = np.asarray(y_slice)
if self._nw_lags is None:
result = math.inv(xx) * (rmse[n] ** 2)
else:
resid = yv - np.dot(xv, beta[n])
m = (xv.T * resid).T
xeps = math.newey_west(m, self._nw_lags, nobs[n], df[n],
self._nw_overlap)
xx_inv = math.inv(xx)
result = np.dot(xx_inv, np.dot(xeps, xx_inv))
results.append(result)
return np.array(results)
@cache_readonly
def _forecast_mean_raw(self):
"""Returns the raw covariance of beta."""
nobs = self._nobs
window = self._window
# x should be ones
dummy = DataFrame(index=self._y.index)
dummy['y'] = 1
cum_xy = self._cum_xy(dummy, self._y)
results = []
for n, i in enumerate(self._valid_indices):
sumy = cum_xy[i]
if self._is_rolling and i >= window:
sumy = sumy - cum_xy[i - window]
results.append(sumy[0] / nobs[n])
return np.array(results)
@cache_readonly
def _forecast_vol_raw(self):
"""Returns the raw covariance of beta."""
beta = self._beta_raw
window = self._window
dates = self._index
x = self._x
results = []
for n, i in enumerate(self._valid_indices):
date = dates[i]
if self._is_rolling and i >= window:
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
x_slice = x.truncate(prior_date, date).values
x_demeaned = x_slice - x_slice.mean(0)
x_cov = np.dot(x_demeaned.T, x_demeaned) / (len(x_slice) - 1)
B = beta[n]
result = np.dot(B, np.dot(x_cov, B))
results.append(np.sqrt(result))
return np.array(results)
@cache_readonly
def _y_fitted_raw(self):
"""Returns the raw fitted y values."""
return (self._x.values * self._beta_matrix(lag=0)).sum(1)
@cache_readonly
def _y_predict_raw(self):
"""Returns the raw predicted y values."""
return (self._x.values * self._beta_matrix(lag=1)).sum(1)
@cache_readonly
def _results(self):
results = {}
for result in self.RESULT_FIELDS:
value = getattr(self, result)
if isinstance(value, Series):
value = value[self.beta.index[-1]]
elif isinstance(value, DataFrame):
value = value.xs(self.beta.index[-1])
else: # pragma: no cover
raise Exception('Problem retrieving %s' % result)
results[result] = value
return results
@cache_readonly
def _window_time_obs(self):
window_obs = (Series(self._time_obs_count > 0)
.rolling(self._window, min_periods=1)
.sum()
.values
)
window_obs[np.isnan(window_obs)] = 0
return window_obs.astype(int)
@cache_readonly
def _nobs_raw(self):
if self._is_rolling:
window = self._window
else:
# expanding case
window = len(self._index)
result = Series(self._time_obs_count).rolling(
window, min_periods=1).sum().values
return result.astype(int)
def _beta_matrix(self, lag=0):
if lag < 0:
raise AssertionError("'lag' must be greater than or equal to 0, "
"input was {0}".format(lag))
betas = self._beta_raw
labels = np.arange(len(self._y)) - lag
indexer = self._valid_obs_labels.searchsorted(labels, side='left')
indexer[indexer == len(betas)] = len(betas) - 1
beta_matrix = betas[indexer]
beta_matrix[labels < self._valid_obs_labels[0]] = np.NaN
return beta_matrix
@cache_readonly
def _valid_obs_labels(self):
dates = self._index[self._valid_indices]
return self._y.index.searchsorted(dates)
@cache_readonly
def _nobs(self):
return self._nobs_raw[self._valid_indices]
@property
def nobs(self):
return Series(self._nobs, index=self._result_index)
@cache_readonly
def _enough_obs(self):
# XXX: what's the best way to determine where to start?
return self._nobs_raw >= max(self._min_periods,
len(self._x.columns) + 1)
def _safe_update(d, other):
"""
Combine dictionaries with non-overlapping keys
"""
for k, v in compat.iteritems(other):
if k in d:
raise Exception('Duplicate regressor: %s' % k)
d[k] = v
def _filter_data(lhs, rhs, weights=None):
"""
Cleans the input for single OLS.
Parameters
----------
lhs : Series
Dependent variable in the regression.
rhs : dict, whose values are Series, DataFrame, or dict
Explanatory variables of the regression.
weights : array-like, optional
1d array of weights. If None, equivalent to an unweighted OLS.
Returns
-------
Series, DataFrame
Cleaned lhs and rhs
"""
if not isinstance(lhs, Series):
if len(lhs) != len(rhs):
raise AssertionError("length of lhs must equal length of rhs")
lhs = Series(lhs, index=rhs.index)
rhs = _combine_rhs(rhs)
lhs = DataFrame({'__y__': lhs}, dtype=float)
pre_filt_rhs = rhs.dropna(how='any')
combined = rhs.join(lhs, how='outer')
if weights is not None:
combined['__weights__'] = weights
valid = (combined.count(1) == len(combined.columns)).values
index = combined.index
combined = combined[valid]
if weights is not None:
filt_weights = combined.pop('__weights__')
else:
filt_weights = None
filt_lhs = combined.pop('__y__')
filt_rhs = combined
if hasattr(filt_weights, 'to_dense'):
filt_weights = filt_weights.to_dense()
return (filt_lhs.to_dense(), filt_rhs.to_dense(), filt_weights,
pre_filt_rhs.to_dense(), index, valid)
def _combine_rhs(rhs):
"""
Glue input X variables together while checking for potential
duplicates
"""
series = {}
if isinstance(rhs, Series):
series['x'] = rhs
elif isinstance(rhs, DataFrame):
series = rhs.copy()
elif isinstance(rhs, dict):
for name, value in compat.iteritems(rhs):
if isinstance(value, Series):
_safe_update(series, {name: value})
elif isinstance(value, (dict, DataFrame)):
_safe_update(series, value)
else: # pragma: no cover
raise Exception('Invalid RHS data type: %s' % type(value))
else: # pragma: no cover
raise Exception('Invalid RHS type: %s' % type(rhs))
if not isinstance(series, DataFrame):
series = DataFrame(series, dtype=float)
return series
# A little kludge so we can use this method for both
# MovingOLS and MovingPanelOLS
def _y_converter(y):
y = y.values.squeeze()
if y.ndim == 0: # pragma: no cover
return np.array([y])
else:
return y
def f_stat_to_dict(result):
f_stat, shape, p_value = result
result = {}
result['f-stat'] = f_stat
result['DF X'] = shape[0]
result['DF Resid'] = shape[1]
result['p-value'] = p_value
return result
|
mit
|
galtay/data_sci_ale
|
project_1/geo_var_state_county.py
|
1
|
10041
|
"""
CMS provides data on the geographic variation in the utilization and quality
of health care services,
https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/Medicare-Geographic-Variation/GV_PUF.html
This module handles the State/County level table (more to be added). The class
handles CSV files that are generated from the excel file
(see `convert_geo_var_state_county_to_csv.py`)
State level data includes all 50 states +
- District of Columbia (DC)
- Puerto Rico (PR)
- US Virgin Islands (VI)
- ??? (XX) maybe this means outside the US ?
"""
import os
import time
import pandas
import us_states
VALID_LEVELS = ['national', 'state', 'county']
DEFAULT_CSV_FNAME = './data/County_All_Table_2014.csv'
class CmsGeoVarCountyTable:
"""Class to handle Geographic Variation Public Use Files (State/County)"""
def __init__(self, csv_fname=DEFAULT_CSV_FNAME, verbose=False):
"""Initialize class with a CSV file name, it is read into a DataFrame.
"""
self.verbose = verbose
if self.verbose: print('csv fname: {}'.format(csv_fname))
self.df = pandas.read_csv(csv_fname)
def select_rows(self, level, exclude=None):
"""Return a selection of rows from the total DataFrame.
Args:
level (str): the selection of rows to return. One of
['national', 'state', 'county']
exclude (list of str): a list of either states or counties
to exclude from the retuned DataFrame
Returns:
DataFrame: Selected rows from the full DataFrame
"""
if level not in VALID_LEVELS:
raise ValueError('level must be one of {}'.format(VALID_LEVELS))
if exclude is None:
exclude = []
if level == 'national':
return self._select_national_row()
elif level == 'state':
return self._select_state_rows(exclude=exclude)
elif level == 'county':
return self._select_county_rows(exclude=exclude)
def _select_national_row(self):
"""Select row that represents the national total."""
bmask = self.df['State']=='National'
return self.df[bmask].iloc[0]
def _select_state_rows(self, exclude=None):
"""Select rows that represent individual states.
By default state abbreviations 'XX', 'DC', 'PR', and 'VI' will be
included. The `exclude` keyword can be set to a list of strings
to remove a set of state abbreviations from the return value.
"""
if exclude is None:
exclude = []
bmask = self.df['County'] == 'STATE TOTAL'
bmask = bmask & ~(self.df['State'].isin(exclude))
return self.df[bmask]
def _select_county_rows(self, exclude=None):
"""Select rows that represent individual counties.
By default state abbreviations 'XX', 'DC', 'PR', and 'VI' will be
included. The `exclude` keyword can be set to a list of strings
to remove a set of counties from the return value.
Note that some states don't have county level data ('XX', 'PR', VI').
In that case the return value will contain the single state total row.
"""
if exclude is None:
exclude = []
# get states that only have state level data
grpd_df = self.df.groupby('State').size()
single_row_states = grpd_df[grpd_df==1].index.tolist()
single_row_states.remove('National')
# is a single row state
bmask1 = self.df['State'].isin(single_row_states)
# is not a single row state and is not a state total
bmask2 = ~bmask1 & (self.df['County'] != 'STATE TOTAL')
# is not a national total
bmask3 = self.df['State'] != 'National'
# is not in county exclude list
bmask4 = ~(self.df['County'].isin(exclude))
bmask = (bmask1 | bmask2) & bmask3 & bmask4
return self.df[bmask]
def return_feature_cols(self):
"""Return a list of column names that could be plausible features
for a learning model. For example we choose 'standardized' and
'per capita' type columns.
"""
#===========================================
# Demographics features
#===========================================
demographics = [
'MA Participation Rate',
'Average Age',
'Percent Female',
'Percent Male',
'Percent Eligible for Medicaid',
'Average HCC Score',
]
#===========================================
# Total Cost features
#===========================================
total_costs = [
'Actual Per Capita Costs',
'Standardized Per Capita Costs',
'Standardized Risk-Adjusted Per Capita Costs',
]
#===========================================
# Service-Level Costs and Utilization
#===========================================
slcu_dict = {}
# All categories have these columns
#===========================================
slcu_categories = [
'IP', 'PAC: LTCH', 'PAC: IRF', 'PAC: SNF', 'PAC: HH',
'Hospice', 'OP', 'FQHC/RHC', 'Outpatient Dialysis Facility',
'ASC', 'E&M', 'Procedures', 'Imaging', 'DME', 'Tests',
'Part B Drugs', 'Ambulance']
slcu_lines = [
'{} Standardized Costs as % of Total Standardized Costs',
'{} Per Capita Standardized Costs',
'{} Per User Standardized Costs',
'% of Beneficiaries Using {}',
]
for cat in slcu_categories:
cols = [line.format(cat) for line in slcu_lines]
slcu_dict[cat] = cols
# Covered Stays
#===========================================
slcu_categories = ['IP', 'PAC: LTCH', 'PAC: IRF', 'PAC: SNF', 'Hospice']
slcu_line = '{} Covered Stays Per 1000 Beneficiaries'
for cat in slcu_categories:
slcu_dict[cat].append(slcu_line.format(cat))
# Covered Days
#===========================================
slcu_categories = ['IP', 'PAC: LTCH', 'PAC: IRF', 'PAC: SNF', 'Hospice']
slcu_line = '{} Covered Days Per 1000 Beneficiaries'
for cat in slcu_categories:
slcu_dict[cat].append(slcu_line.format(cat))
# Visits
#===========================================
slcu_categories = ['PAC: HH', 'OP', 'FQHC/RHC']
slcu_line = '{} Visits Per 1000 Beneficiaries'
for cat in slcu_categories:
slcu_dict[cat].append(slcu_line.format(cat))
# Events
#===========================================
slcu_categories = [
'Outpatient Dialysis Facility', 'ASC', 'E&M',
'Procedures', 'Imaging', 'DME', 'Tests', 'Ambulance']
slcu_line = '{} Events Per 1000 Beneficiaries'
for cat in slcu_categories:
slcu_dict[cat].append(slcu_line.format(cat))
# fix plurality
slcu_dict['Procedures'][-1] = (
slcu_dict['Procedures'][-1].replace('Procedures', 'Procedure'))
slcu_dict['Tests'][-1] = (
slcu_dict['Tests'][-1].replace('Tests', 'Test'))
#===========================================
# Readmissions and ED Visits
#===========================================
readmission_ed = [
'Hospital Readmission Rate',
'Emergency Department Visits per 1000 Beneficiaries',
]
#===========================================
# Combine all into a list of feature columns
#===========================================
feature_cols = []
feature_cols += demographics
feature_cols += total_costs
# Service-Level Costs and Utilization
feature_cols += slcu_dict['IP']
feature_cols += slcu_dict['PAC: LTCH']
feature_cols += slcu_dict['PAC: IRF']
feature_cols += slcu_dict['PAC: SNF']
feature_cols += slcu_dict['PAC: HH']
feature_cols += slcu_dict['Hospice']
feature_cols += slcu_dict['OP']
feature_cols += slcu_dict['FQHC/RHC']
feature_cols += slcu_dict['Outpatient Dialysis Facility']
feature_cols += slcu_dict['ASC']
feature_cols += slcu_dict['E&M']
feature_cols += slcu_dict['Procedures']
feature_cols += slcu_dict['Imaging']
feature_cols += slcu_dict['DME']
feature_cols += slcu_dict['Tests']
feature_cols += slcu_dict['Part B Drugs']
feature_cols += slcu_dict['Ambulance']
# Readmissions and ED visits
feature_cols += readmission_ed
return feature_cols
def check_state_totals_sum_to_national(df_state_totals, ser_national):
"""Check that summing the state level rows recovers the national total"""
state_sum = df_state_totals['Total Actual Costs'].sum()
national_total = ser_national['Total Actual Costs']
print('state sum vs national total')
print('state sum: {}'.format(state_sum))
print('national: {}'.format(national_total))
print(state_sum/national_total)
print()
def check_county_totals_sum_to_national(df_county_totals, ser_national):
"""Check that summing the county level rows recovers the national total"""
county_sum = df_county_totals['Total Actual Costs'].sum()
national_total = ser_national['Total Actual Costs']
print('county sum vs national total')
print('county sum: {}'.format(county_sum))
print('national: {}'.format(national_total))
print(county_sum/national_total)
print()
if __name__ == '__main__':
fname = './data/County_All_Table_2014.csv'
gvct = CmsGeoVarCountyTable(fname, verbose=True)
st = gvct.select_rows('state')
ct = gvct.select_rows('county')
nt = gvct.select_rows('national')
check_state_totals_sum_to_national(st, nt)
check_county_totals_sum_to_national(ct, nt)
|
gpl-3.0
|
pypot/scikit-learn
|
examples/decomposition/plot_kernel_pca.py
|
353
|
2011
|
"""
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
|
bsd-3-clause
|
jpautom/scikit-learn
|
examples/decomposition/plot_faces_decomposition.py
|
103
|
4394
|
"""
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
|
bsd-3-clause
|
rayNymous/nupic
|
src/nupic/research/monitor_mixin/monitor_mixin_base.py
|
27
|
5512
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
MonitorMixinBase class used in monitor mixin framework.
"""
import abc
import numpy
from prettytable import PrettyTable
from nupic.research.monitor_mixin.plot import Plot
class MonitorMixinBase(object):
"""
Base class for MonitorMixin. Each subclass will be a mixin for a particular
algorithm.
All arguments, variables, and methods in monitor mixin classes should be
prefixed with "mm" (to avoid collision with the classes they mix in to).
"""
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
"""
Note: If you set the kwarg "mmName", then pretty-printing of traces and
metrics will include the name you specify as a tag before every title.
"""
self.mmName = kwargs.get("mmName")
if "mmName" in kwargs:
del kwargs["mmName"]
super(MonitorMixinBase, self).__init__(*args, **kwargs)
# Mapping from key (string) => trace (Trace)
self._mmTraces = None
self._mmData = None
self.mmClearHistory()
def mmClearHistory(self):
"""
Clears the stored history.
"""
self._mmTraces = {}
self._mmData = {}
@staticmethod
def mmPrettyPrintTraces(traces, breakOnResets=None):
"""
Returns pretty-printed table of traces.
@param traces (list) Traces to print in table
@param breakOnResets (BoolsTrace) Trace of resets to break table on
@return (string) Pretty-printed table of traces.
"""
assert len(traces) > 0, "No traces found"
table = PrettyTable(["#"] + [trace.prettyPrintTitle() for trace in traces])
for i in xrange(len(traces[0].data)):
if breakOnResets and breakOnResets.data[i]:
table.add_row(["<reset>"] * (len(traces) + 1))
table.add_row([i] +
[trace.prettyPrintDatum(trace.data[i]) for trace in traces])
return table.get_string().encode("utf-8")
@staticmethod
def mmPrettyPrintMetrics(metrics, sigFigs=5):
"""
Returns pretty-printed table of metrics.
@param metrics (list) Traces to print in table
@param sigFigs (int) Number of significant figures to print
@return (string) Pretty-printed table of metrics.
"""
assert len(metrics) > 0, "No metrics found"
table = PrettyTable(["Metric", "mean", "standard deviation",
"min", "max", "sum", ])
for metric in metrics:
table.add_row([metric.prettyPrintTitle()] + metric.getStats())
return table.get_string().encode("utf-8")
def mmGetDefaultTraces(self, verbosity=1):
"""
Returns list of default traces. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default traces
"""
return []
def mmGetDefaultMetrics(self, verbosity=1):
"""
Returns list of default metrics. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default metrics
"""
return []
def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title="",
showReset=False, resetShading=0.25):
"""
Returns plot of the cell activity. Note that if many timesteps of
activities are input, matplotlib's image interpolation may omit activities
(columns in the image).
@param cellTrace (list) a temporally ordered list of sets of cell
activities
@param cellCount (int) number of cells in the space being rendered
@param activityType (string) type of cell activity being displayed
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a grayscale background
@param resetShading (float) applicable if showReset is true, specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@return (Plot) plot
"""
plot = Plot(self, title)
resetTrace = self.mmGetTraceResets().data
data = numpy.zeros((cellCount, 1))
for i in xrange(len(cellTrace)):
# Set up a "background" vector that is shaded or blank
if showReset and resetTrace[i]:
activity = numpy.ones((cellCount, 1)) * resetShading
else:
activity = numpy.zeros((cellCount, 1))
activeIndices = cellTrace[i]
activity[list(activeIndices)] = 1
data = numpy.concatenate((data, activity), 1)
plot.add2DArray(data, xlabel="Time", ylabel=activityType, name=title)
return plot
|
agpl-3.0
|
apahl/cellpainting
|
cellpainting/processing.py
|
1
|
55494
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
##########
Processing
##########
*Created on Thu Jun 1 14:15 2017 by A. Pahl*
Processing results from the CellPainting Assay in the Jupyter notebook.
This module provides the DataSet class and its methods.
Additional functions in this module act on pandas DataFrames."""
import time
import glob
import os.path as op
from collections import Counter
import xml.etree.ElementTree as ET
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from rdkit.Chem import AllChem as Chem
from rdkit import DataStructs
from IPython.core.display import HTML
from . import tools as cpt
from .config import ACT_PROF_PARAMETERS
from .config import LIMIT_SIMILARITY_L, LIMIT_CELL_COUNT_L, LIMIT_ACTIVITY_L
try:
from misc_tools import apl_tools
AP_TOOLS = True
#: Library version
VERSION = apl_tools.get_commit(__file__)
# I use this to keep track of the library versions I use in my project notebooks
print("{:45s} (commit: {})".format(__name__, VERSION))
except ImportError:
AP_TOOLS = False
print("{:45s} ({})".format(__name__, time.strftime("%y%m%d-%H:%M", time.localtime(op.getmtime(__file__)))))
try:
from . import resource_paths as cprp
except ImportError:
from . import resource_paths_templ as cprp
print("* Resource paths not found, stub loaded.")
print(" Automatic loading of resources will not work,")
print(" please have a look at resource_paths_templ.py")
try:
import holoviews as hv
hv.extension("bokeh")
HOLOVIEWS = True
except ImportError:
HOLOVIEWS = False
print("* holoviews could not be import. heat_hv is not available.")
FINAL_PARAMETERS = ['Metadata_Plate', 'Metadata_Well', 'plateColumn', 'plateRow',
"Compound_Id", 'Container_Id', "Well_Id", "Producer", "Pure_Flag", "Toxic",
"Rel_Cell_Count", "Known_Act", "Trivial_Name", 'WellType', 'Conc_uM',
"Activity", "Act_Profile", "Plate", "Smiles"]
DROP_FROM_NUMBERS = ['plateColumn', 'plateRow', 'Conc_uM', "Compound_Id"]
DROP_GLOBAL = ["PathName_CellOutlines", "URL_CellOutlines", 'FileName_CellOutlines',
'ImageNumber', 'Metadata_Site', 'Metadata_Site_1', 'Metadata_Site_2']
QUANT = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
DEBUG = False
def debug_print(txt, val):
if DEBUG:
txt = txt + ":"
print("DEBUG {:20s}".format(txt), val)
class DataSet():
def __init__(self, log=True):
self.data = pd.DataFrame()
self.fields = {"plateColumn": "Metadata_Plate",
"WellType": "WellType", "ControlWell": "Control", "CompoundWell": "Compound"}
self.log = log
def __getitem__(self, item):
res = self.data[item]
if isinstance(res, pd.DataFrame):
result = DataSet()
result.data = res
result.print_log("subset")
else:
result = res
return result
# def __getattr__(self, name):
# """Try to call undefined methods on the underlying pandas DataFrame."""
# def method(*args, **kwargs):
# res = getattr(self.data, name)(*args, **kwargs)
# if isinstance(res, pd.DataFrame):
# result = DataSet()
# result.data = res
# result.print_log(name)
# else:
# result = res
# return result
# return method
def __getattr__(self, name):
"""Try to call undefined methods on the underlying pandas DataFrame."""
if hasattr(self.data, name):
def method(*args, **kwargs):
res = getattr(self.data, name)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
result = self.new()
result.data = res
print_log(result.data, name)
else:
result = res
return result
return method
else:
raise AttributeError
def new(self):
result = DataSet()
# self._pass_properties(result)
return result
def show(self):
parameters = [k for k in FINAL_PARAMETERS if k in self.data]
print("Shape: ", self.shape)
print("Parameters:", parameters)
return HTML(self.data[parameters]._repr_html_())
def head(self, n=5):
parameters = [k for k in FINAL_PARAMETERS if k in self.data]
res = self.data[parameters].head(n)
result = DataSet()
result.data = res
result.print_log("head")
return result
def drop_cols(self, cols, inplace=False):
"""Drops the list of columns from the DataFrame.
Listed columns that are not present in the DataFrame are simply ignored
(no error is thrown)."""
if inplace:
drop_cols(self.data, cols, inplace=True)
self.print_log("drop cols (inplace)")
else:
result = DataSet()
result.data = drop_cols(self.data, cols, inplace=False)
result.print_log("drop cols")
return result
def keep_cols(self, cols, inplace=False):
if inplace:
self.data = self.data[cols]
self.print_log("keep cols (inplace)")
else:
result = DataSet()
result.data = self.data[cols]
result.print_log("keep cols")
return result
def print_log(self, component, add_info=""):
if self.log:
print_log(self.data, component, add_info)
def load(self, fn, sep="\t"):
"""Read one or multiple result files and concatenate them into one dataset.
`fn` is a single filename (string) or a list of filenames."""
self.data = load(fn, sep=sep).data
self.print_log("load data")
def write_csv(self, fn, parameters=None, sep="\t"):
result = self.data.copy()
if isinstance(parameters, list):
result = result[parameters]
result.to_csv(fn, sep=sep, index=False)
def write_pkl(self, fn):
self.data.to_pickle(fn)
def write_parameters(self, fn="parameters.txt"):
parameters = sorted(self.measurements)
with open("parameters.txt", "w") as f:
f.write('"')
f.write('",\n"'.join(parameters))
f.write('"')
print(len(parameters), "parameters written.")
def describe(self, times_mad=3.0):
df = numeric_parameters(self.data)
stats = pd.DataFrame()
stats["Min"] = df.min()
stats["Max"] = df.max()
stats["Median"] = df.median()
stats["MAD"] = df.mad()
stats["Outliers"] = df[(((df - df.median()).abs() - times_mad * df.mad()) > 0)].count()
print(self.shape)
return stats
def well_type_from_position(self):
"""Assign the WellType from the position on the plate.
Controls are in column 11 and 12"""
result = DataSet(log=self.log)
result.data = well_type_from_position(self.data)
result.print_log("well type from pos")
return result
def well_from_position(self, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Assign Metadata_Well from plateRow, plateColumn"""
result = DataSet(log=self.log)
result.data = well_from_position(self.data, well_name=well_name,
row_name=row_name, col_name=col_name)
result.print_log("well from pos")
return result
def position_from_well(self, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Generate plateRow and plateColumn from Metatadata_Well"""
result = DataSet(log=self.log)
result.data = position_from_well(self.data, well_name=well_name,
row_name=row_name, col_name=col_name)
result.print_log("pos from well")
return result
def join_layout_384(self, layout_fn, on="Address_384"):
result = DataSet(log=self.log)
result.data = join_layout_384(self.data, layout_fn, on=on)
result.print_log("join layout 384")
return result
def join_layout_1536(self, plate, quadrant, on="Address_384", how="inner"):
"""Cell Painting is always run in 384er plates.
COMAS standard screening plates are format 1536.
With this function, the 1536-to-384 reformatting file
with the smiles added by join_smiles_to_layout_1536()
can be used directly to join the layout to the individual 384er plates."""
result = DataSet(log=self.log)
result.data = join_layout_1536(self.data, plate, quadrant, on=on, how=how)
result.print_log("join layout 1536")
return result
def numeric_parameters(self):
result = DataSet()
result.data = numeric_parameters(self.data)
return result
def flag_toxic(self, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Flag data rows of toxic compounds"""
result = DataSet()
result.data = flag_toxic(self.data, cutoff=cutoff)
flagged = result.data["Toxic"].sum()
result.print_log("flag toxic", "{:3d} flagged".format(flagged))
return result
def remove_toxic(self, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Remove data rows of toxic compounds"""
result = DataSet()
toxic = DataSet()
result.data, toxic.data = remove_toxic(self.data, cutoff=cutoff)
result.print_log("remove toxic", "{:3d} removed".format(toxic.shape[0]))
return result, toxic
def remove_impure(self, strict=False, reset_index=True):
"""Remove entries with `Pure_Flag == "Fail"`"""
result = DataSet()
flagged = DataSet()
result.data, flagged.data = remove_impure(self.data)
result.print_log("remove impure", "{:3d} removed".format(flagged.shape[0]))
return result, flagged
def remove_outliers(self, times_dev=3.0, group_by=None, method="median"):
"""Returns the filtered dataframe as well as the outliers.
method can be `median`or `mean` """
result = DataSet()
outliers = DataSet()
result.data, outliers.data = remove_outliers(self.data, times_dev=times_dev,
group_by=group_by, method=method)
result.print_log("remove outliers", "{:3d} removed".format(outliers.shape[0]))
return result, outliers
def remove_skipped_echo_direct_transfer(self, fn):
"""Remove wells that were reported as skipped in the Echo protocol (xml).
This functions works with Echo direct transfer protocols.
Function supports using wildcards in the filename, the first file will be used.
Returns a new dataframe without the skipped wells."""
result = DataSet()
result.data, skipped = remove_skipped_echo_direct_transfer(self.data, fn=fn)
skipped_str = "(" + ", ".join(skipped) + ")"
result.print_log("remove skipped", "{:3d} skipped {}".format(self.shape[0] - result.shape[0],
skipped_str))
return result
def drop_dups(self, cpd_id="Compound_Id"):
"""Drop duplicate Compound_Ids"""
result = DataSet()
result.data = self.data.drop_duplicates(cpd_id)
result.print_log("drop dups")
return result
def group_on_well(self, group_by=FINAL_PARAMETERS):
"""Group results on well level."""
result = DataSet()
result.data = group_on_well(self.data, group_by=group_by)
result.print_log("group on well")
return result
def join_batch_data(self, df_data=None, how="left", fillna="n.d."):
"""Join data by Batch_Id."""
result = DataSet()
result.data = join_batch_data(self.data, df_data=df_data, how=how, fillna=fillna)
result.print_log("join batch data")
return result
def join_container_data(self, df_data=None, how="left", fillna=""):
"""Join data by Container_Id."""
result = DataSet()
result.data = join_container_data(self.data, df_data=df_data, how=how, fillna=fillna)
result.print_log("join cntnr data")
return result
def join_container(self, cont_data=None, how="inner"):
result = DataSet(log=self.log)
result.data = join_container(self.data, cont_data=cont_data, how=how)
result.print_log("join container")
return result
def join_smiles(self, df_smiles=None, how="left"):
"""Join Smiles from Compound_Id."""
result = DataSet()
result.data = join_smiles(self.data, df_smiles=df_smiles, how=how)
result.print_log("join smiles")
return result
def join_annotations(self):
"""Join Annotations from Compound_Id."""
result = DataSet()
result.data = join_annotations(self.data)
result.print_log("join annotations")
return result
def add_dmso(self):
"""Add DMSO to references."""
result = DataSet()
result.data = add_dmso(self.data)
result.print_log("add DMSO")
return result
def poc(self, group_by=None, well_type="WellType", control_name="Control"):
"""Normalize the data set to Percent-Of-Control per group (e.g. per plate)
based on the median of the controls.
Parameters:
group_by (string or None): optional column by which the calculation should be grouped,
e.g. the column with plate name."""
result = DataSet()
result.data = poc(self.data, group_by=group_by)
self.print_log("POC")
return result
def activity_profile(self, mad_mult=3.5, parameters=ACT_PROF_PARAMETERS, only_final=True):
"""Generates the `Act_Profile` column.
The byte is set when the parameter's value is greater (or smaller)
than parameter_ctrl.median() + (or -) `mad_mult`* parameter.mad()
If a list of parameters is given, then the activity profile will be calculated
for these parameters.
If `only_final` == `True`, then only the parameters listed in `FINAL_PARAMETERS`
are kept in the output_table.
Returns a new Pandas DataFrame."""
result = DataSet()
result.data = activity_profile(self.data, mad_mult=mad_mult, parameters=parameters,
only_final=only_final)
result.print_log("activity profile")
return result
def relevant_parameters(self, ctrls_std_rel_min=0.001,
ctrls_std_rel_max=0.10):
result = DataSet()
result.data = relevant_parameters(self.data, ctrls_std_rel_min=ctrls_std_rel_min,
ctrls_std_rel_max=ctrls_std_rel_max)
num_parm = len(result.measurements)
result.print_log("relevant parameters", "{:.3f}/{:.3f}/{:4d}"
.format(ctrls_std_rel_min, ctrls_std_rel_max, num_parm))
return result
def correlation_filter(self, cutoff=0.9, method="pearson"):
"""The correlation removes all highly correlated columns from the dataframe.
The function was implemented according to the description of the corresponding
KNIME component.
Parameters:
cutoff (float): correlation cutoff
method (string): "pearson", "kendall", "spearman" (very slow)
Returns a new DataFrame with only the non-correlated columns"""
result = DataSet()
result.data, iterations = correlation_filter(self.data, cutoff=cutoff, method=method)
num_parm = len(result.measurements)
result.print_log("correl. filter (mad)", "{:3d} iterations/{:4d}"
.format(iterations, num_parm))
return result
def correlation_filter_std(self, cutoff=0.9, method="pearson"):
"""The correlation removes all highly correlated columns from the dataframe.
The function was implemented according to the description of the corresponding
KNIME component.
Parameters:
cutoff (float): correlation cutoff
method (string): "pearson", "kendall", "spearman" (very slow)
Returns a new DataFrame with only the non-correlated columns"""
result = DataSet()
result.data, iterations = correlation_filter_std(self.data, cutoff=cutoff, method=method)
num_parm = len(result.measurements)
result.print_log("correl. filter (std)", "{:3d} iterations/{:4d}"
.format(iterations, num_parm))
return result
def id_filter(self, cpd_ids, id_col="Compound_Id", reset_index=True, sort_by_input=False):
result = self.new()
result.data = id_filter(self.data, cpd_ids, id_col=id_col, reset_index=reset_index,
sort_by_input=sort_by_input)
return result
def add_act_profile_for_control(self, parameters=ACT_PROF_PARAMETERS):
# Compound_Id DMSO: 245754
control = {"Compound_Id": 245754, "Trivial_Name": "Control", "Activity": 0,
"Act_Profile": "".join(["1"] * len(parameters))}
ck = control.keys()
for k in ck:
if k not in self.data.keys():
control.pop(k)
tmp = pd.DataFrame(control)
result = DataSet()
result.data = pd.concat(self.data, tmp)
return result
def update_similar_refs(self, mode="cpd", write=True):
"""Find similar compounds in references and update the export file.
The export file of the dict object is in pkl format. In addition,
a tsv file (or maybe JSON?) is written for use in PPilot.
This method dpes not return anything, it just writes the result to fle."""
rem = "" if write else "write is off"
update_similar_refs(self.data, mode=mode, write=write)
self.print_log("update similar", rem)
def update_datastore(self, mode="cpd", write=True):
"""Update the DataStore with the current DataFrame."""
update_datastore(self.data, mode=mode, write=write)
def find_similar(self, act_profile, cutoff=0.5, max_num=5):
"""Filter the dataframe for activity profiles similar to the given one.
`cutoff` gives the similarity threshold, default is 0.5."""
result = DataSet()
result.data = find_similar(self.data, act_profile=act_profile, cutoff=cutoff, max_num=max_num)
result.print_log("find similar")
return result
def well_id_similarity(self, well_id1, well_id2):
"""Calculate the similarity of the activity profiles from two compounds
(identified by `Compound_Id`). Returns value between 0 .. 1"""
return well_id_similarity(self.data, well_id1, self.data, well_id2)
def count_active_parameters_occurrences(self, act_prof="Act_Profile",
parameters=ACT_PROF_PARAMETERS):
"""Counts the number of times each parameter has been active in the dataset."""
return count_active_parameters_occurrences(self.data, act_prof=act_prof,
parameters=ACT_PROF_PARAMETERS)
def heat_mpl(self, id_prop="Compound_Id"):
heat_mpl(self.data, id_prop)
@property
def shape(self):
return self.data.shape
@property
def metadata(self):
"""Returns a list of the those parameters in the DataFrame that are NOT CellProfiler measurements."""
return metadata(self.data)
@property
def measurements(self):
"""Returns a list of the CellProfiler parameters that are in the DataFrame."""
return measurements(self.data)
def load(fn, sep="\t"):
"""Read one or multiple result files and concatenate them into one dataset.
`fn` is a single filename (string) or a list of filenames."""
result = DataSet()
if isinstance(fn, list):
result.data = pd.concat((pd.read_csv(f, sep=sep) for f in fn))
else:
result.data = pd.read_csv(fn, sep=sep)
drop = [d for d in DROP_GLOBAL if d in result.data.keys()]
result.data.drop(drop, axis=1, inplace=True)
result.print_log("load dataset")
return result
def load_pkl(fn):
result = DataSet()
result.data = pd.read_pickle(fn)
result.print_log("load pickle")
return result
def print_log(df, component, add_info=""):
component = component + ":"
if len(add_info) > 0:
add_info = " ({})".format(add_info)
print("* {:22s} ({:5d} | {:4d}){}".format(component, df.shape[0], df.shape[1], add_info))
def read_smiles_file(fn, props=['Compound_Id', "Smiles"]):
"""Read in the file with the Compound_Ids and the Smiles.
Return a DataFrame for fast access."""
result = pd.read_csv(fn, sep="\t")
result = result[props]
result = result.apply(pd.to_numeric, errors='ignore')
return result
def clear_resources():
try:
del SMILES
print("* deleted resource: SMILES")
except NameError:
pass
try:
del ANNOTATIONS
print("* deleted resource: ANNOTATIONS")
except NameError:
pass
try:
del REFERENCES
print("* deleted resource: REFERENCES")
except NameError:
pass
try:
del SIM_REFS
print("* deleted resource: SIM_REFS")
except NameError:
pass
try:
del DATASTORE
print("* deleted resource: DATASTORE")
except NameError:
pass
try:
del LAYOUTS
print("* deleted resource: LAYOUTS")
except NameError:
pass
def load_resource(resource, mode="cpd"):
"""Available resources: SMILES, ANNOTATIONS, SIM_REFS, REFERENCES,
CONTAINER, CONTAINER_DATA, BATCH_DATA, DATASTORE, LAYOUTS"""
res = resource.lower()
glbls = globals()
if "smi" in res:
if "SMILES" not in glbls:
# except NameError:
global SMILES
print("- loading resource: (SMILES)")
SMILES = read_smiles_file(cprp.smiles_path,
props=cprp.smiles_cols)
SMILES = SMILES.apply(pd.to_numeric, errors='ignore')
elif "annot" in res:
if "ANNOTATIONS" not in glbls:
global ANNOTATIONS
print("- loading resource: (ANNOTATIONS)")
ANNOTATIONS = pd.read_csv(cprp.annotations_path, sep="\t")
ANNOTATIONS = ANNOTATIONS.apply(pd.to_numeric, errors='ignore')
elif "sim" in res:
if "SIM_REFS" not in glbls:
global SIM_REFS
print("- loading resource: (SIM_REFS)")
if "ext" in mode.lower():
srp = cprp.sim_refs_ext_path
else:
srp = cprp.sim_refs_path
try:
SIM_REFS = pd.read_csv(srp, sep="\t")
except FileNotFoundError:
print(" * SIM_REFS not found, creating new one.")
SIM_REFS = pd.DataFrame()
elif "ref" in res:
if "REFERENCES" not in glbls:
global REFERENCES
print("- loading resource: (REFERENCES)")
REFERENCES = pd.read_csv(cprp.references_path, sep="\t") # .fillna("")
elif "cont" in res:
if "CONTAINER" not in glbls:
global CONTAINER
print("- loading resource: (CONTAINER)")
CONTAINER = pd.read_csv(cprp.container_path, sep="\t")
if len(cprp.container_data_cols) > 0:
CONTAINER = CONTAINER[cprp.container_cols]
CONTAINER = CONTAINER.apply(pd.to_numeric, errors='ignore')
elif "container_d" in res:
if "CONTAINER_DATA" not in glbls:
global CONTAINER_DATA
print("- loading resource: (CONTAINER)")
CONTAINER_DATA = pd.read_csv(cprp.container_data_path, sep="\t")
if len(cprp.container_data_cols) > 0:
CONTAINER_DATA = CONTAINER_DATA[cprp.container_data_cols]
CONTAINER_DATA = CONTAINER_DATA.apply(pd.to_numeric, errors='ignore')
elif "batch_d" in res:
if "BATCH_DATA" not in glbls:
global BATCH_DATA
print("- loading resource: (BATCH_DATA)")
BATCH_DATA = pd.read_csv(cprp.batch_data_path, sep="\t")
if len(cprp.batch_data_cols) > 0:
BATCH_DATA = BATCH_DATA[cprp.batch_data_cols]
BATCH_DATA = BATCH_DATA.apply(pd.to_numeric, errors='ignore')
elif "datast" in res:
if "DATASTORE" not in glbls:
global DATASTORE
print("- loading resource: (DATASTORE)")
try:
DATASTORE = pd.read_csv(cprp.datastore_path, sep="\t")
except FileNotFoundError:
print(" * DATASTORE not found, creating new one.")
DATASTORE = pd.DataFrame()
elif "layout" in res:
if "LAYOUTS" not in glbls:
global LAYOUTS
print("- loading resource: (LAYOUTS)")
LAYOUTS = pd.read_csv(cprp.layouts_path, sep="\t")
else:
raise FileNotFoundError("# unknown resource: {}".format(resource))
def well_type_from_position(df):
"""Assign the WellType from the position on the plate.
Controls are in column 11 and 12"""
result = df.copy()
result["WellType"] = "Compound"
result["WellType"][(result["plateColumn"] == 11) | (result["plateColumn"] == 12)] = "Control"
return result
def drop_cols(df, cols, inplace=False):
"""Drops the list of columns from the DataFrame.
Listed columns that are not present in the DataFrame are simply ignored
(no error is thrown)."""
df_keys = df.keys()
drop = [k for k in cols if k in df_keys]
if inplace:
df.drop(drop, axis=1, inplace=True)
else:
result = df.drop(drop, axis=1)
return result
def well_from_position(df, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Assign Metadata_Well from plateRow, plateColumn"""
def _well_from_position_series(s):
return cpt.well_from_position_single(s[0], s[1])
result = df.copy()
result[well_name] = result[[row_name, col_name]].apply(_well_from_position_series, axis=1)
return result
def position_from_well(df, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Generate plateRow and plateColumn from Metatadata_Well"""
def _position_from_well_series(well):
return (pd.Series(cpt.position_from_well_single(well)))
result = df.copy()
result[[row_name, col_name]] = result[well_name].apply(_position_from_well_series)
return result
def join_layout_384(df, layout_fn, on="Address"):
result = df.copy()
result[on] = result["Metadata_Well"]
layout = pd.read_csv(layout_fn)
result = result.merge(layout, on=on)
result.drop(on, axis=1, inplace=True)
result = result.apply(pd.to_numeric, errors='ignore')
return result
def get_batch_from_container(df):
result = df.copy()
result["Batch_Id"] = result["Container_Id"].str[:9]
return result
def get_cpd_from_container(df):
result = pd.concat([df, df["Container_Id"].str.split(":", expand=True)], axis=1)
result.rename(columns={0: "Compound_Id"}, inplace=True)
drop_cols(result, [1, 2, 3, 4], inplace=True)
return result
def join_layout_1536(df, plate, quadrant, on="Address_384", sep="\t", how="inner"):
"""Cell Painting is always run in 384er plates.
COMAS standard screening plates are format 1536.
With this function, the 1536-to-384 reformatting file
can be used directly to join the layout to the individual 384er plates."""
load_resource("LAYOUTS")
layout = LAYOUTS.copy()
if not isinstance(quadrant, str):
quadrant = str(quadrant)
drop = ["Plate_name_384", "Plate_name_1536", "Address_1536", "Index", 1, 2]
result = df.copy()
layout[on] = layout["Plate_name_384"] + layout[on]
if "Container_ID_1536" in layout.keys():
layout.rename(columns={"Container_ID_1536": "Container_Id"}, inplace=True)
if "Conc" in layout.keys():
layout.rename(columns={"Conc": "Conc_uM"}, inplace=True)
layout = join_container(layout)
drop_cols(layout, drop, inplace=True)
result[on] = plate + "." + quadrant[-1:] + result["Metadata_Well"]
result = result.merge(layout, on=on, how=how)
result.drop(on, axis=1, inplace=True)
result["Well_Id"] = result["Container_Id"] + "_" + result["Metadata_Well"]
result = result.apply(pd.to_numeric, errors='ignore')
return result
def write_datastore():
df = DATASTORE[cprp.datastore_cols]
df = df.sort_values("Well_Id")
df.to_csv(cprp.datastore_path, index=False, sep="\t")
print_log(df, "write datastore")
def update_datastore(df2, on="Well_Id", mode="cpd", write=False):
global DATASTORE
load_resource("DATASTORE")
df1 = DATASTORE
df2 = df2.copy()
if "ref" in mode:
df2["Is_Ref"] = True
else:
df2["Is_Ref"] = False
df2 = df2[cprp.datastore_cols]
df1 = df1.append(df2, ignore_index=True)
rem = "" if write else "write is off"
print_log(df2, "update datastore", rem)
DATASTORE = df1.drop_duplicates(subset=on, keep="last")
if write:
write_datastore()
def join_batch_data(df, df_data=None, how="Left", fillna="n.d."):
"""Join data from Batch_Id."""
if df_data is None:
load_resource("BATCH_DATA")
df_data = BATCH_DATA
if "Batch_Id" not in df.keys():
df = get_batch_from_container(df)
result = df.merge(df_data, on="Batch_Id", how=how)
result = result.apply(pd.to_numeric, errors='ignore')
result = result.fillna(fillna)
return result
def join_container_data(df, df_data=None, how="Left", fillna=""):
"""Join data from Container_Id."""
if df_data is None:
load_resource("CONTAINER_DATA")
df_data = CONTAINER_DATA
result = df.merge(df_data, on="Container_Id", how=how)
result = result.apply(pd.to_numeric, errors='ignore')
result = result.fillna(fillna)
return result
def join_container(df, cont_data=None, how="inner"):
if cont_data is None:
load_resource("CONTAINER")
cont_data = CONTAINER[["Container_Id", "Compound_Id"]]
result = df.merge(cont_data, on="Container_Id", how=how)
return result
def join_smiles(df, df_smiles=None, how="left"):
"""Join Smiles from Compound_Id."""
if df_smiles is None:
load_resource("SMILES")
df_smiles = SMILES
result = df.merge(df_smiles, on="Compound_Id", how=how)
result = result.apply(pd.to_numeric, errors='ignore')
result = result.fillna("*")
return result
def join_annotations(df):
"""Join Annotations from Compound_Id."""
load_resource("ANNOTATIONS")
annotations = ANNOTATIONS
drop_cols(df, ["Trivial_Name", "Known_Act"], inplace=True)
result = df.merge(annotations, on="Compound_Id", how="left")
result = result.fillna("")
return result
def add_dmso(df):
if df[df["Compound_Id"] == 245754].shape[0] > 0:
# DMSO already present
result = df.copy()
else:
d = {
"Compound_Id": [245754], "Container_Id": ["245754:01:01"], "Well_Id": ["245754:01:01_H11"],
"Producer": ["DMSO"], "Conc_uM": [10], "Activity": [0.0], "Rel_Cell_Count": [100],
"Pure_Flag": ["Ok"], "Toxic": [False], "Trivial_Name": ["DMSO"], "Known_Act": ["Control"],
"Metadata_Well": ["H11"], "Plate": ["170523-S0195-1"], "Smiles": ["CS(C)=O"],
"Act_Profile": [len(ACT_PROF_PARAMETERS) * "1"]
}
dmso = pd.DataFrame(d)
result = pd.concat([df, dmso])
return result
def metadata(df):
"""Returns a list of the those parameters in the DataFrame that are NOT CellProfiler measurements."""
parameters = [k for k in df.keys()
if not (k.startswith("Count_") or k.startswith("Median_"))]
return parameters
def measurements(df):
"""Returns a list of the CellProfiler parameters that are in the DataFrame."""
parameters = [k for k in df.select_dtypes(include=[np.number]).keys()
if k.startswith("Count_") or k.startswith("Median_")]
return parameters
def numeric_parameters(df):
result = df.copy()[measurements(df)]
return result
def flag_toxic(df, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Flag data rows of toxic compounds"""
result = df.copy()
median_cell_count_controls = df[df["WellType"] == "Control"]["Count_Cells"].median()
result["Toxic"] = (result["Count_Cells"] < median_cell_count_controls * cutoff)
result["Rel_Cell_Count"] = (100 * (result["Count_Cells"] / median_cell_count_controls)).astype(int)
return result
def remove_toxic(df, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Remove data rows of toxic compounds"""
if "Toxic" not in df.keys():
flagged = flag_toxic(df, cutoff=cutoff)
else:
flagged = df.copy()
result = flagged[~flagged["Toxic"]]
toxic = flagged[flagged["Toxic"]]
return result, toxic
def remove_skipped_echo_direct_transfer(df, fn):
"""Remove wells that were reported as skipped in the Echo protocol (xml).
This functions works with Echo direct transfer protocols.
Function supports using wildcards in the filename, the first file will be used.
Returns a new dataframe without the skipped wells."""
assert fn.endswith(".xml"), "Echo file expected in XML format."
skipped_wells = []
try:
echo_fn = glob.glob(fn)[0] # use the first glob match
except IndexError:
raise FileNotFoundError("Echo file could not be found")
echo_print = ET.parse(echo_fn).getroot()
skipped = echo_print.find("skippedwells")
for well in skipped.findall("w"):
skipped_wells.append(cpt.format_well(well.get("dn")))
# print("Skipped wells (will be removed):", skipped_wells)
# remove the rows with the skipped wells
# i.e. keep the rows where Metadata_Well is not in the list skipped_wells
result = df[~df["Metadata_Well"].isin(skipped_wells)]
return result, skipped_wells
def remove_impure(df, strict=False, reset_index=True):
"""Remove entries with `Pure_Flag == "Fail"`
If `strict == True` compound with `Pure_Flag == Warn` are also removed."""
result = df.copy()
outliers_list = []
try:
outl = result[result["Pure_Flag"] == "Fail"]
except TypeError:
print(result["Pure_Flag"].dtype)
raise
result = result[result["Pure_Flag"] != "Fail"]
outliers_list.append(outl)
if strict:
outl = result[result["Pure_Flag"] == "Warn"]
result = result[result["Pure_Flag"] != "Warn"]
outliers_list.append(outl)
outliers = pd.concat(outliers_list)
if reset_index:
result = result.reset_index()
outliers = outliers.reset_index()
result.drop("index", axis=1, inplace=True)
outliers.drop("index", axis=1, inplace=True)
return result, outliers
def remove_outliers(df, times_dev=3.0, group_by=None, method="median", reset_index=True):
"""Returns the filtered dataframe as well as the outliers.
method can be `median`or `mean` """
include = [k for k in FINAL_PARAMETERS if k in df.keys()]
input = df.copy()
# input = numeric_parameters(df)
if group_by is None:
group_by = "temp_group"
input[group_by] = "data"
gdata_list = []
outliers_list = []
for group in df[group_by].unique():
gdata = input[input[group_by] == group]
gdata = numeric_parameters(gdata)
if method == "median":
mask = (gdata - gdata.median()).abs() - times_dev * gdata.mad() <= 0
elif method == "mean":
mask = (gdata - gdata.mean()).abs() - times_dev * gdata.std() <= 0
else:
raise ValueError("Unknown method {}.".format(method))
good_data = gdata[(mask).all(axis=1)]
outl_data = gdata[(~(mask).all(axis=1))] # outliers
# print(group, ": ", good_data.shape, outl_data.shape)
gdata_list.append(good_data)
outliers_list.append(outl_data)
result = pd.concat(gdata_list)
outliers = pd.concat(outliers_list)
if group_by == "temp_group": # remove the grouping temp col again
result.drop(group_by, axis=1, inplace=True)
outliers.drop(group_by, axis=1, inplace=True)
for k in include:
result[k] = df[k]
outliers[k] = df[k]
if reset_index:
result = result.reset_index()
outliers = outliers.reset_index()
return result, outliers
def group_on_well(df, group_by=FINAL_PARAMETERS):
"""Group results on well level."""
group_by = list(set(group_by).intersection(set(df.keys())))
result = df.groupby(by=group_by).median().reset_index()
return result
def poc(df, group_by=None):
result = df.copy()
if group_by is None: # create a temp grouping column
group_by = "temp_group"
result[group_by] = "data"
plates = set(result[group_by])
for plate in plates:
print("Normalizing {}... ".format(plate), end="")
controls = result[(result[group_by] == plate) & (result["WellType"] == "Control")].select_dtypes(include=[np.number])
median = controls.median()
for col in controls.keys():
if col in FINAL_PARAMETERS: continue
result[col] = 100 * result[col] / median[col]
print("done.")
if group_by == "temp_group": # remove the grouping temp col again
result.drop(group_by, axis=1, inplace=True)
return result
def activity_profile(df, mad_mult=3.5, parameters=ACT_PROF_PARAMETERS, only_final=True):
"""Generates the `Act_Profile` column.
The byte is set when the parameter's value is greater (or smaller)
than parameter_ctrl.median() + (or -) `mad_mult`* parameter.mad()
If a list of parameters is given, then the activity profile will be calculated for these parameters.
If `only_final` == `True`, then only the parameters listed in `FINAL_PARAMETERS`
are kept in the output_table.
Returns a new Pandas DataFrame."""
decimals = {"Activity": 1}
result = df.copy()
if parameters is None: # choose all numeric parameters
act_parameters = measurements(df)
else:
act_parameters = parameters.copy()
assert len(act_parameters) > 0
# sort parameters alphabetically
act_parameters.sort()
controls = df[act_parameters][df["WellType"] == "Control"]
for key in act_parameters:
median = controls[key].median()
times_mad = mad_mult * controls[key].mad()
lower_bound = median - times_mad
upper_bound = median + times_mad
result.loc[df[key].between(lower_bound, upper_bound, inclusive=True), [key]] = 1
result.loc[df[key] < lower_bound, [key]] = 0
result.loc[df[key] > upper_bound, [key]] = 2
result[act_parameters] = result[act_parameters].astype(int)
result["Activity"] = 100 * (result[act_parameters] != 1).sum(axis=1) / len(act_parameters)
result["Act_Profile"] = result[act_parameters].astype(str).apply(lambda x: "".join(x), axis=1)
if only_final:
drop = []
for k in result.keys():
if k not in FINAL_PARAMETERS:
drop.append(k)
result.drop(drop, axis=1, inplace=True)
result = result.round(decimals)
return result
def relevant_parameters(df, ctrls_std_rel_min=0.001,
ctrls_std_rel_max=0.1, group_by="Plate"):
"""...std_rel...: mad relative to the median value"""
relevant_table = FINAL_PARAMETERS.copy()
ctrl_set = set(df.keys())
plates = sorted(set(df[group_by]))
for plate in plates:
debug_print("Processing plate", plate)
controls = df[(df[group_by] == plate) & (df["WellType"] == "Control")].select_dtypes(include=[np.number])
median = controls.median()
std = controls.quantile(q=QUANT).std()
ds = std / median >= ctrls_std_rel_min
tmp_set = set([p for p in ds.keys() if ds[p]])
ctrl_set.intersection_update(tmp_set)
debug_print("ctrl_set", len(ctrl_set))
ds = std / median <= ctrls_std_rel_max
tmp_set = set([p for p in ds.keys() if ds[p]])
ctrl_set.intersection_update(tmp_set)
# debug_print("tmp_set", len(tmp_set))
debug_print("ctrl_set", len(ctrl_set))
relevant_table.extend(list(ctrl_set))
debug_print("relevant_table", len(relevant_table))
result_keys = list(df.keys())
keep = []
for key in result_keys:
if key in relevant_table:
keep.append(key)
result = df[keep]
debug_print("keep", len(keep))
return result
def correlation_filter(df, cutoff=0.9, method="pearson"):
assert method in ["pearson", "kendall", "spearman"], 'method has to be one of ["pearson", "kendall", "spearman"]'
df_copy = df.copy().select_dtypes(include=[np.number])
# init the list of the uncorrelated parameters, incl. some string param.
parameters_uncorr = [p for p in FINAL_PARAMETERS if p in df]
iteration = 0
while True:
cm = df_copy.corr(method=method).abs()
correlated = cm[cm > cutoff]
ds = correlated.count().sort_values(ascending=False)
if ds[0] == 1: break # no more correlations
iteration += 1
# from all columns with the same number of correlated columns,
# find the column with the highest value range
# and keep that preferably
num_correlated = ds[0] # number of correlated columns
rnge = 0.0
rnge_key = ""
for i in range(len(ds)):
if ds[i] < num_correlated: break # only compare columns with the same highest correlation
k = ds.keys()[i]
debug_print(" k", k)
r = df_copy[k].max() - df_copy[k].min()
if r > rnge:
rnge = r
rnge_key = k
keep_it = rnge_key
parameters_uncorr.append(keep_it)
debug_print("keep_it", keep_it)
debug_print("num_corr.", num_correlated)
# find the parameters actually correlated to `keep_it`
parameters_to_remove = list(correlated[keep_it][correlated[keep_it].notnull()].keys())
# The uncorrelated parameter `keep_it` is also in this list and has to be removed from it:
debug_print("param_to_rem", parameters_to_remove)
# remove the correlated parameters:
df_copy.drop(parameters_to_remove, axis=1, inplace=True)
parameters_uncorr.extend(df_copy.keys())
parameters_uncorr = list(set(parameters_uncorr))
return df[parameters_uncorr], iteration
def correlation_filter_std(df, cutoff=0.9, method="pearson"):
"""Reduce the parameter set to only uncorrelated parameters. From a set of correlated
parameters only the one with the lowest variance in the controls is kept,
all others are discarded."""
assert method in ["pearson", "kendall", "spearman"], 'method has to be one of ["pearson", "kendall", "spearman"]'
assert "WellType" in df.keys()
# init the list of the uncorrelated parameters, incl. some string param.
parameters_uncorr = [p for p in FINAL_PARAMETERS if p in df]
df_copy = df.copy()
controls = df_copy[df_copy["WellType"] == "Control"]
controls_rel_std = controls.quantile(q=QUANT).std() / controls.median()
df_copy = df_copy.select_dtypes(include=[np.number])
iteration = 0
while True:
cm = df_copy.corr(method=method)
correlated = cm[cm > cutoff]
ds = correlated.count().sort_values(ascending=False)
if ds[0] == 1: break # no more correlations
iteration += 1
equal_corr = ds[ds == ds[0]]
eq_keys = equal_corr.keys()
# from all columns with the same number of correlated columns,
# find the column with the highest POC range
# and keep that preferably
keep_it = controls_rel_std[eq_keys].sort_values(ascending=True).keys()[0]
parameters_uncorr.append(keep_it)
debug_print("keep_it", keep_it)
debug_print("num_corr.", ds[0])
# find the parameters actually correlated to `keep_it`
parameters_to_remove = list(correlated[keep_it][correlated[keep_it].notnull()].keys())
debug_print("param_to_rem", parameters_to_remove)
# remove the correlated parameters:
df_copy.drop(parameters_to_remove, axis=1, inplace=True)
parameters_uncorr.extend(df_copy.keys())
parameters_uncorr = list(set(parameters_uncorr))
# print("It took {} iterations to remove all correlated parameters.".format(iteration - 1))
return df[parameters_uncorr], iteration
def correlation_filter_std_old(df, cutoff=0.9, method="pearson"):
"""Reduce the parameter set to only uncorrelated parameters. From a set of correlated
parameters only the one with the lowest variance in the controls is kept,
all others are discarded."""
assert method in ["pearson", "kendall", "spearman"], 'method has to be one of ["pearson", "kendall", "spearman"]'
assert "WellType" in df.keys()
# init the list of the uncorrelated parameters, incl. some string param.
parameters_uncorr = [p for p in FINAL_PARAMETERS if p in df]
df_copy = df.copy()
controls = df_copy[df_copy["WellType"] == "Control"]
controls_median = controls.median()
df_copy = df_copy.select_dtypes(include=[np.number])
iteration = 0
while True:
cm = df_copy.corr(method=method)
correlated = cm[cm > cutoff]
ds = correlated.count().sort_values(ascending=False)
if ds[0] == 1: break # no more correlations
iteration += 1
equal_corr = ds[ds == ds[0]]
eq_keys = equal_corr.keys()
# from all columns with the same number of correlated columns,
# find the column with the highest POC range
# and keep that preferably
poc = (df_copy[eq_keys] / controls_median[eq_keys])
keep_it = (poc.max() - poc.min()).sort_values(ascending=False).keys()[0]
parameters_uncorr.append(keep_it)
debug_print("keep_it", keep_it)
debug_print("num_corr.", ds[0])
# find the parameters actually correlated to `keep_it`
parameters_to_remove = list(correlated[keep_it][correlated[keep_it].notnull()].keys())
debug_print("param_to_rem", parameters_to_remove)
# remove the correlated parameters:
df_copy.drop(parameters_to_remove, axis=1, inplace=True)
parameters_uncorr.extend(df_copy.keys())
parameters_uncorr = list(set(parameters_uncorr))
# print("It took {} iterations to remove all correlated parameters.".format(iteration - 1))
return df[parameters_uncorr], iteration
def id_filter(df, cpd_ids, id_col="Compound_Id", reset_index=True, sort_by_input=False):
if not isinstance(cpd_ids, list):
cpd_ids = [cpd_ids]
result = df[df[id_col].isin(cpd_ids)]
if reset_index:
result.reset_index(inplace=True)
result.drop("index", axis=1, inplace=True)
if sort_by_input:
result["_sort"] = pd.Categorical(result[id_col], categories=cpd_ids, ordered=True)
result = result.sort_values("_sort")
result.drop("_sort", axis=1, inplace=False)
print_log(result, "id filter")
return result
def find_similar(df, act_profile, cutoff=0.5, max_num=5,
ignore_direction=False, mask=""):
"""Filter the dataframe for activity profiles similar to the given one.
`cutoff` gives the similarity threshold, default is 0.5."""
decimals = {"Similarity": 3}
result = df.copy()
result["Similarity"] = result["Act_Profile"].apply(lambda x: cpt.profile_sim(
x,
act_profile,
ignore_direction=ignore_direction,
mask=mask
))
result = result[result["Similarity"] >= cutoff]
result.drop("Act_Profile", axis=1, inplace=True)
result = result.sort_values("Similarity", ascending=False).head(max_num)
result = result.round(decimals)
return result
def write_obj(obj, fn):
"""Save a generic python object through pickling."""
with open(fn, "wb") as f:
pickle.dump(obj, f)
def write_sim_refs(mode="cpd"):
"""Export of sim_refs as pkl and as tsv for PPilot"""
global SIM_REFS
keep = ["Compound_Id", "Well_Id", "Is_Ref", "Ref_Id", "RefCpd_Id",
"Similarity", "Tanimoto", "Times_Found"]
if "ext" in mode.lower():
sim_fn = cprp.sim_refs_ext_path
else:
sim_fn = cprp.sim_refs_path
sim_fn_pp = op.splitext(sim_fn)[0] + "_pp.tsv"
SIM_REFS = SIM_REFS[keep]
sim_refs = SIM_REFS
sim_refs.to_csv(sim_fn, sep="\t", index=False) # the resource should be loaded at this point
df = sim_refs.sort_values("Similarity", ascending=False)
df = df.drop_duplicates(subset="Well_Id", keep="first")
df = df.rename(columns={"Similarity": "Highest_Sim"})
df.to_csv(sim_fn_pp, sep="\t", index=False) # tsv for PPilot
print("* {:22s} ({:5d} | -- )".format("write sim_refs", len(sim_refs)))
def load_obj(fn):
with open(fn, "rb") as f:
obj = pickle.load(f)
return obj
def mol_from_smiles(smi):
if not isinstance(smi, str):
smi = "*"
mol = Chem.MolFromSmiles(smi)
if not mol:
mol = Chem.MolFromSmiles("*")
return mol
def update_similar_refs(df, mode="cpd", write=True):
"""Find similar compounds in references and update the export file.
The export file of the DataFrame object is in tsv format. In addition,
another tsv file (or maybe JSON?) is written for use in PPilot.
`mode` can be "cpd" or "ref". if `sim_refs`is not None,
it has to be a dict of the correct format.
With `write=False`, the writing of the file can be deferred to the end of the processing pipeline,
but has to be done manually, then, with `write_sim_refs()`."""
def _chem_sim(mol_fp, query_smi):
query = mol_from_smiles(query_smi)
if len(query.GetAtoms()) > 1:
query_fp = Chem.GetMorganFingerprint(query, 2) # ECFC4
return round(DataStructs.TanimotoSimilarity(mol_fp, query_fp), 3)
return np.nan
global SIM_REFS
mode = mode.lower()
load_resource("REFERENCES")
load_resource("SIM_REFS", mode=mode)
df_refs = REFERENCES
sim_refs = SIM_REFS
for _, rec in df.iterrows():
if rec["Activity"] < LIMIT_ACTIVITY_L or rec["Toxic"]:
# no similarites for low active or toxic compounds
continue
act_profile = rec["Act_Profile"]
max_num = 5
if "ref" in mode:
max_num += 1
similar = find_similar(df_refs, act_profile, cutoff=LIMIT_SIMILARITY_L / 100, max_num=max_num)
if "ref" in mode:
similar.drop(similar.head(1).index, inplace=True)
if len(similar) > 0:
similar = similar[["Well_Id", "Compound_Id", "Similarity", "Smiles"]]
similar = similar.rename(columns={"Well_Id": "Ref_Id", "Compound_Id": "RefCpd_Id"})
similar["Well_Id"] = rec["Well_Id"]
if "ref" in mode:
similar["Is_Ref"] = True
else:
similar["Is_Ref"] = False
similar["Compound_Id"] = rec["Compound_Id"]
mol = mol_from_smiles(rec.get("Smiles", "*"))
if len(mol.GetAtoms()) > 1:
mol_fp = Chem.GetMorganFingerprint(mol, 2) # ECFC4
similar["Tanimoto"] = similar["Smiles"].apply(lambda q: _chem_sim(mol_fp, q))
else:
similar["Tanimoto"] = np.nan
sim_refs = sim_refs.append(similar, ignore_index=True)
sim_refs = sim_refs.drop_duplicates(subset=["Well_Id", "Ref_Id"], keep="last")
# Assign the number of times a reference was found by a research compound
drop_cols(sim_refs, ["Times_Found"], inplace=True)
tmp = sim_refs.copy()
tmp = tmp[~tmp["Is_Ref"]]
tmp = tmp.groupby(by="Ref_Id").count().reset_index()
# "Compound_Id" is just one field that contains the correct count:
tmp = tmp[["Ref_Id", "Compound_Id"]]
tmp = tmp.rename(columns={"Compound_Id": "Times_Found"})
sim_refs = pd.merge(sim_refs, tmp, on="Ref_Id", how="left")
sim_refs = sim_refs.fillna(0)
sim_refs["Times_Found"] = sim_refs["Times_Found"].astype(int)
SIM_REFS = sim_refs
if write:
# with write=False, the writing can be deferred to the end of the processing pipeline,
# but has to be done manually, then.
write_sim_refs()
def well_id_similarity(df1, well_id1, df2, well_id2):
"""Calculate the similarity of the activity profiles from two compounds
(identified by `Well_Id`). Returns value between 0 .. 1"""
act1 = df1[df1["Well_Id"] == well_id1]["Act_Profile"].values[0]
act2 = df2[df2["Well_Id"] == well_id2]["Act_Profile"].values[0]
return round(cpt.profile_sim(act1, act2), 3)
def count_active_parameters_occurrences(df, act_prof="Act_Profile", parameters=ACT_PROF_PARAMETERS):
"""Counts the number of times each parameter has been active in the dataset."""
ctr_int = Counter()
ctr_str = {}
for _, rec in df.iterrows():
for idx, b in enumerate(rec[act_prof]):
if b != "1":
ctr_int[idx] += 1
for k, val in ctr_int.items():
ctr_str[parameters[k]] = val
return ctr_str
def heat_mpl(df, id_prop="Compound_Id"):
plt.close()
df_len = len(df)
plt.style.use("seaborn-white")
plt.style.use("seaborn-pastel")
plt.style.use("seaborn-talk")
plt.rcParams['axes.labelsize'] = 25
# plt.rcParams['legend.fontsize'] = 20
plt.rcParams['figure.figsize'] = (12, 1.1 + 0.47 * (df_len - 1))
plt.rcParams['axes.labelsize'] = 25
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['xtick.labelsize'] = 15
fs_text = 18
y_labels = []
fp_list = []
for _, rec in df.iterrows():
y_labels.append(rec[id_prop])
fp = cpt.prof_to_list(rec["Act_Profile"])
fp_list.append(fp)
# invert y axis:
y_labels = y_labels[::-1]
fp_list = fp_list[::-1]
Z = np.asarray(fp_list)
plt.xticks([100, 186, 307])
plt.yticks(np.arange(df_len) + 0.5, y_labels)
plt.pcolor(Z, cmap="bwr")
plt.text(50, -1.1, "Cells", horizontalalignment='center', fontsize=fs_text)
plt.text(143, -1.1, "Cytoplasm", horizontalalignment='center', fontsize=fs_text)
plt.text(246, -1.1, "Nuclei", horizontalalignment='center', fontsize=fs_text)
plt.tight_layout()
plt.show()
def heat_hv(df, id_prop="Compound_Id"):
df_len = len(df)
hm_opts = dict(width=870, height=40 + 25 * df_len, tools=['hover'], invert_yaxis=False,
xrotation=90, labelled=[], toolbar='above', xaxis=None)
hm_style = {"cmap": "bwr"}
opts = {'HeatMap': {'plot': hm_opts, "style": hm_style}}
df_heat = cpt.melt(df, id_prop=id_prop)
heatmap = hv.HeatMap(df_heat)
return heatmap(opts)
|
mit
|
itoledoc/gWTO2
|
wtoAlgorithm.py
|
1
|
40183
|
from datetime import datetime
from datetime import timedelta
import numpy as np
import pandas as pd
import ephem
from lxml import objectify
from wtoDatabase import WtoDatabase
import ruvTest as rUV
from scipy.stats import rayleigh
"""
wtoAlgorithm.py: the gWTO selector and scorer library.
======================================================
This library contains the classes and functions required to select and rank
SBs from the information that is stored in a WtoDatabse object.
"""
__author__ = 'itoledo'
confDf = pd.DataFrame(
[('C34-1', 3.73, 2.49, 1.62, 1.08, 0.81, 0.57)],
columns=['Conf', 'ALMA_RB_03', 'ALMA_RB_04', 'ALMA_RB_06', 'ALMA_RB_07',
'ALMA_RB_08', 'ALMA_RB_09'],
index=['C34-1'])
confDf.ix['C34-2'] = ('C34-2', 2.04, 1.36, 0.89, 0.59, 0.44, 0.31)
confDf.ix['C34-3'] = ('C34-3', 1.4, 0.93, 0.61, 0.4, 0.3, 0.21)
confDf.ix['C34-4'] = ('C34-4', 1.11, 0.74, 0.48, 0.32, 0.24, 0.17)
confDf.ix['C34-5'] = ('C34-5', 0.75, 0.50, 0.33, 0.22, 0.16, 0.12)
confDf.ix['C34-6'] = ('C34-6', 0.57, 0.38, 0.25, 0.16, 0.12, 0.09)
confDf.ix['C34-7'] = ('C34-7', 0.41, 0.27, 0.18, 0.12, None, None)
pwn = [0., 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35,
0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75,
0.8, 0.85, 0.9, 0.95, 1., 1.05, 1.1, 1.15,
1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55,
1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95,
2., 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35,
2.4, 2.45, 2.5, 2.55, 2.6, 2.65, 2.7, 2.75,
2.8, 2.85, 2.9, 2.95, 3., 3.05, 3.1, 3.15,
3.2, 3.25, 3.3, 3.35, 3.4, 3.45, 3.5, 3.55,
3.6, 3.65, 3.7, 3.75, 3.8, 3.85, 3.9, 3.95,
4., 4.05, 4.1, 4.15, 4.2, 4.25, 4.3, 4.35,
4.4, 4.45, 4.5, 4.55, 4.6, 4.65, 4.7, 4.75,
4.8, 4.85, 4.9, 4.95, 5., 5.05, 5.1, 5.15,
5.2, 5.25, 5.3, 5.35, 5.4, 5.45, 5.5, 5.55,
5.6, 5.65, 5.7, 5.75, 5.8, 5.85, 5.9, 5.95,
6., 6.2, 6.4, 6.6, 6.8, 7., 7.2, 7.4,
7.6, 7.8, 8., 8.2, 8.4, 8.6, 8.8, 9.,
9.2, 9.4, 9.6, 9.8, 10., 10.2, 10.4, 10.6,
10.8, 11., 11.2, 11.4, 11.6, 11.8, 12., 12.2,
12.4, 12.6, 12.8, 13., 13.2, 13.4, 13.6, 13.8,
14., 14.2, 14.4, 14.6, 14.8, 15., 15.2, 15.4,
15.6, 15.8, 16., 16.2, 16.4, 16.6, 16.8, 17.,
17.2, 17.4, 17.6, 17.8, 18., 18.2, 18.4, 18.6,
18.8, 19., 19.2, 19.4, 19.6, 19.8, 20.]
pd.options.display.width = 200
pd.options.display.max_columns = 55
SSO = ['Moon', 'Sun', 'Mercury', 'Venus', 'Mars', 'Jupiter', 'Saturn',
'Uranus', 'Neptune', 'Pluto']
MOON = ['Ganymede', 'Europa', 'Callisto', 'Io', 'Titan']
alma1 = ephem.Observer()
alma1.lat = '-23.0262015'
alma1.long = '-67.7551257'
alma1.elev = 5060
h = 6.6260755e-27
k = 1.380658e-16
c = 2.99792458e10
c_mks = 2.99792458e8
# J = hvkT / (np.exp(hvkT) - 1)
# TebbSky = TebbSkyZenith*(1-np.exp(-airmass*(tau)))/(1-np.exp(-tau))
# TebbSky_Planck = TebbSky*J
# tsys = (1+g)*(t_rx + t_sky*0.95 + 0.05*270) / (0.95 * np.exp(-tau*airmass))
# noinspection PyUnresolvedReferences,PyTypeChecker
class WtoAlgorithm(WtoDatabase):
"""
Inherits from WtoDatabase, adds the methods for selection and scoring.
It also sets the default parameters for these methods: pwv=1.2, date=now,
array angular resolution, transmission=0.5, minha=-5, maxha=3, etc.
:param path: A path, relative to $HOME, where the cache is stored.
:type path: (default='/.wto/') String.
:param source: See WtoDatabase definitions.
:param forcenew: See WtoDatabase definitions.
:return: A WtoAlgorithm instance.
"""
def __init__(self, path='/.wto/', source=None, forcenew=False):
super(WtoAlgorithm, self).__init__(path, source, forcenew)
self.pwv = 1.2
self.date = ephem.now()
self.old_date = 0
self.array_ar = 0.94
self.transmission = 0.5
self.minha = -5.0
self.maxha = 3.0
self.minfrac = 0.75
self.maxfrac = 1.2
self.horizon = 20.
self.maxblfrac = 1.5
self.exec_prio = {'EA': 10., 'NA': 10., 'EU': 10., 'CL': 10.,
'OTHER': 10.}
self.num_ant_user = 34
self.defarrays = ['C34-1', 'C34-2', 'C34-3', 'C34-4', 'C34-5', 'C34-6',
'C34-7']
self.arr_ar_def = {'C34-1': 3.73, 'C34-2': 2.04, 'C34-3': 1.4,
'C34-4': 1.11, 'C34-5': 0.75, 'C34-6': 0.57,
'C34-7': 0.41}
self.array_name = None
self.not_horizon = False
self.tau = pd.read_csv(
self.wto_path + 'conf/tau.csv', sep=',', header=0).set_index('freq')
self.tsky = pd.read_csv(
self.wto_path + 'conf/tskyR.csv', sep=',', header=0).set_index(
'freq')
self.pwvdata = pd.read_pickle(
self.wto_path + 'conf/' + self.preferences.pwv_data).set_index(
'freq')
self.pwvdata.index = pd.Float64Index(
pd.np.round(self.pwvdata.index, decimals=1), dtype='float64')
self.alma = alma1
self.reciever = pd.DataFrame(
[55., 45., 75., 110., 51., 150.],
columns=['trx'],
index=['ALMA_RB_06', 'ALMA_RB_03', 'ALMA_RB_07', 'ALMA_RB_09',
'ALMA_RB_04', 'ALMA_RB_08'])
self.reciever['g'] = [0., 0., 0., 1., 0., 0.]
io_file = open(self.wto_path + 'conf/ArrayConfiguration.xml')
tree = objectify.parse(io_file)
antfile = tree.getroot()
io_file.close()
self.antpad = pd.DataFrame(columns=['pad', 'antenna'])
for n in range(len(antfile.AntennaOnPad)):
p = antfile.AntennaOnPad[n].attrib['pad']
a = antfile.AntennaOnPad[n].attrib['antenna']
self.antpad.loc[n] = (p, a)
self.query_arrays()
# noinspection PyAugmentAssignment
def selector(self, array):
"""
Selects SBs that can be observed given the current weather conditions,
HA range, array type and array configuration (in the case of 12m array
type) and SB/Project Status. See
:ref:`Selection and Data preparation <selection>`
:param array: '12m', '7m', 'tp'
:type array: String.
:return: Depending on the array type, creates tables select12m, select7m
or selecttp.
"""
# TODO: add a 5% padding to fraction selection.
# TODO: check with Jorge Garcia the rms fraction against reality.
self.check_observability(array)
if array not in ['12m', '7m', 'tp']:
print("Use 12m, 7m or tp for array selection.")
return None
else:
if array == '12m':
array1 = ['TWELVE-M']
elif array == '7m':
array1 = ['SEVEN-M', 'ACA']
else:
array1 = ['TP-Array']
sel = self.sb_summary.copy()
if array == '7m':
sel = sel[
(sel.array == array1[0]) |
(sel.array == array1[1])]
else:
sel = sel[sel.array == array1[0]]
print("SBs for %s array: %d" % (array, len(sel)))
pwvcol = self.pwvdata[[str(self.pwv)]]
len_bf_cond = len(sel)
sel = pd.merge(
sel, pwvcol, left_on='repfreq', right_index=True)
sel.rename(columns={str(self.pwv): 'transmission'}, inplace=True)
ind1 = sel.repfreq
ind2 = pd.np.around(sel.maxPWVC, decimals=1).astype(str)
sel['tau_org'] = self.tau.lookup(ind1, ind2)
sel['tsky_org'] = self.tsky.lookup(ind1, ind2)
sel['airmass'] = 1 / pd.np.cos(pd.np.radians(-23.0262015 - sel.DEC))
sel = pd.merge(sel, self.reciever, left_on='band', right_index=True,
how='left')
tskycol = self.tsky[[str(self.pwv)]]
sel = pd.merge(sel, tskycol, left_on='repfreq', right_index=True)
taucol = self.tau[[str(self.pwv)]]
sel.rename(columns={str(self.pwv): 'tsky'}, inplace=True)
sel = pd.merge(sel, taucol, left_on='repfreq', right_index=True)
sel.rename(columns={str(self.pwv): 'tau'}, inplace=True)
print("SBs in sb_summary: %d. SBs merged with tau/tsky info: %d." %
(len_bf_cond, len(sel)))
sel['sel_array'] = False
# Patch for hybrid configuration, C34-6 & C34-7
if array == '12m':
self.ruv.sort()
ruv6 = self.ruv[self.ruv < 1091.].copy()
# x = np.linspace(0, ruv6.max() + 100., 1000)
param = rayleigh.fit(ruv6)
maxl6 = np.min([ruv6.max(), rayleigh.interval(0.992, loc=param[0],
scale=param[1])[1]])
self.ruv6 = ruv6.copy()
self.res6 = 61800 / (100. * maxl6)
self.blnum6 = len(ruv6)
if self.blnum6 < 591:
self.ruv6 = self.ruv.copy()
self.res6 = 0.571
print self.blnum6, self.res6
if array == '12m':
sel.loc[
((sel.arrayMinAR <= self.array_ar) &
(sel.arrayMaxAR >= self.array_ar)) |
((sel.arrayMinAR <= self.res6) &
(sel.arrayMaxAR >= self.res6)) |
((sel.arrayMinAR >= self.array_ar) &
(sel.arrayMaxAR <= self.res6)), 'sel_array'] = True
print("SBs for current 12m Array AR: %d. "
"(AR=%.2f, #bl=%d, #ant=%d)" %
(len(sel.query('sel_array == True')), self.array_ar,
self.num_bl, self.num_ant))
sel['blmax'] = sel.apply(
lambda row: rUV.computeBL(row['AR'], row['repfreq']), axis=1)
sel['blmin'] = sel.apply(
lambda row: rUV.computeBL(row['LAS'], row['repfreq']), axis=1)
if self.array_name is not None:
sel['blfrac'] = sel.apply(
lambda row: (33. * 17) / (1. * len(
self.ruv[(self.ruv <= row['blmax'])]))
if (row['isPointSource'] == False)
else (33. * 17) / (self.num_ant * (self.num_ant - 1) / 2.),
axis=1)
else:
sel['blfrac'] = sel.apply(
lambda row: (33. * 17) / (1. * len(
self.ruv[self.ruv < row['blmax']]))
if (row['isPointSource'] == False)
else (33. * 17) /
(34. * (34. - 1) / 2.),
axis=1)
if self.num_ant != 34:
sel.loc[:, 'blfrac'] = sel.loc[:, 'blfrac'] * (
33 * 17 / (self.num_ant * (
self.num_ant - 1) / 2.))
sel.loc[:, 'blfrac'] = sel.apply(
lambda row: ret_cycle(row[u'CODE'], row['blfrac']), axis=1
)
elif array == '7m':
sel['sel_array'] = True
sel['blfrac'] = 1.
if self.num_ant != 9:
sel.loc[:, 'blfrac'] = sel.loc[:, 'blfrac'] * (
9 * 4 / (self.num_ant * (
self.num_ant - 1) / 2.))
else:
sel['sel_array'] = True
sel['blfrac'] = 1.
sel['tsys'] = (
1 + sel['g']) * \
(sel['trx'] + sel['tsky'] *
((1 - pd.np.exp(-1 * sel['airmass'] * sel['tau'])) /
(1 - pd.np.exp(-1. * sel['tau']))) * 0.95 + 0.05 * 270.) / \
(0.95 * pd.np.exp(-1 * sel['tau'] * sel['airmass']))
sel['tsys_org'] = (
1 + sel['g']) * \
(sel['trx'] + sel['tsky_org'] *
((1 - pd.np.exp(-1 * sel['airmass'] * sel['tau_org'])) /
(1 - pd.np.exp(-1. * sel['tau_org']))) * 0.95 + 0.05 * 270.) / \
(0.95 * pd.np.exp(-1 * sel['tau_org'] * sel['airmass']))
sel['sel_trans'] = False
sel.loc[(sel.transmission > self.transmission) |
(sel.maxPWVC >= self.pwv),
'sel_trans'] = True
print("SBs with a transmission higher than %2.1f: %d" %
(self.transmission,
len(sel.query('sel_array == True and sel_trans == True'))))
self.alma.date = self.date
lst = pd.np.degrees(self.alma.sidereal_time())
ha = (lst - sel.RA) / 15.
ha.loc[ha > 12] = ha.loc[ha > 12] - 24.
ha.loc[ha < -12] = 24. + ha.loc[ha < -12]
sel['HA'] = ha
sel['sel_ha'] = False
sel.loc[
((sel.HA > self.minha) & (sel.HA < self.maxha)) |
(sel.RA == 0.), 'sel_ha'] = True
s3 = len(sel.query('sel_array == True and sel_trans == True and'
' sel_ha == True'))
print("SBs within current HA limits (or RA=0): %d" % s3)
sel['tsysfrac'] = (sel.tsys / sel.tsys_org) ** 2.
sel = pd.merge(sel, self.obser_prop, left_on='SB_UID',
right_index=True)
sel['sel_el'] = False
if self.not_horizon is False:
sel.loc[(sel.up == 1) & (sel.etime > 1.5), 'sel_el'] = True
s4 = len(
sel.query('sel_array == True and sel_trans == True and'
' sel_ha == True and sel_el == True'))
print("SBs over %d degrees, 1.5 hours: %d" %
(self.horizon, s4))
sel['sel_st'] = False
sel.loc[(sel.SB_state != "Phase2Submitted") &
(sel.SB_state != "FullyObserved") &
(sel.SB_state != "Deleted") &
(sel.SB_state != "Canceled") &
(sel.PRJ_state != "Phase2Submitted") &
(sel.PRJ_state != "Completed"), 'sel_st'] = True
sel.loc[
(sel.name.str.contains('not', case=False) == True),
'sel_st'] = False
s5 = len(
sel.query(
'sel_array == True and sel_trans == True and sel_ha == True '
'and sel_el == True and sel_st == True'))
print("SBs with Ok state: %d" % s5)
sel['sel_exe'] = False
sel.loc[sel.execount > sel.Total, 'sel_exe'] = True
s6 = len(
sel.query(
'sel_array == True and sel_trans == True and sel_ha == True '
'and sel_el == True and sel_st == True and sel_exe == True'))
print("SBs with missing exec: %d" % s6)
sel['frac'] = sel.tsysfrac * sel.blfrac
fg = self.fieldsource.query(
'isQuery == False and name == "Primary:"'
).groupby('SB_UID')
p = pd.DataFrame(
[fg.pointings.mean(), fg.pointings.count()],
index=['mpointings', 'sources']).transpose()
sel = pd.merge(sel, p, left_on='SB_UID', right_index=True, how='left')
if array == '12m':
self.select12m = sel.query(
'sel_array == True and sel_trans == True and sel_ha == True '
'and sel_el == True and sel_st == True and sel_exe == True and '
'tsysfrac < 2.1')
# print sel.query(
# 'sel_array == True and sel_trans == True and sel_ha == True '
# 'and sel_el == True and sel_st == True and sel_exe == True and '
# 'frac >= 2.1')
self.all12m = sel
print("SBs with 'frac' < 2.1: %d" % len(self.select12m))
elif array == '7m':
self.select7m = sel.query(
'sel_array == True and sel_trans == True and sel_ha == True '
'and sel_el == True and sel_st == True and sel_exe == True and '
'frac < 2.1')
self.all7m = sel
print("SBs with 'frac' < 2.1: %d" % len(self.select7m))
else:
self.selecttp = sel.query(
'sel_array == True and sel_trans == True and sel_ha == True '
'and sel_el == True and sel_st == True and sel_exe == True and '
'frac < 2.1')
self.alltp = sel
print("SBs with 'frac' < 2.1: %d" % len(self.selecttp))
def scorer(self, array):
"""
Method that handles the score calculation for each array type. It
applies ``self.calculate_score()`` to the previously selected SBs using
``self.selector()``
:param array: '12m', '7m', 'tp'
:type array: String.
:return: Creates a score table for the instance, which will be named as
``score12m``, ``score7m`` or ``scoretp``.
"""
if array == '12m':
try:
df = self.select12m
except AttributeError:
print("Please execute method self.selector('12m') first.")
return None
elif array == '7m':
try:
df = self.select7m
except AttributeError:
print("Please execute method self.selector('7m') first.")
return None
elif array == 'tp':
try:
df = self.selecttp
except AttributeError:
print("Please execute method self.selector('tp') first.")
return None
else:
print("array must be either 12m, 7m or tp.")
return None
self.max_scirank = df.scienceRank.max()
if len(df) > 0:
scores = df.apply(
lambda r: self.calculate_score(
r['execount'], r['Total'], r['scienceRank'], r['AR'],
r['arrayMinAR'], r['arrayMaxAR'], r['LAS'],
r['grade'], r['repfreq'], r['DEC'], r['EXEC'], array,
r['frac'], r['maxPWVC'], r['CODE'], r['name'], r['HA']),
axis=1)
scores = pd.DataFrame(scores.values.tolist(), index=scores.index)
scores.columns = pd.Index(
[u'sb_cond_score', u'sb_array_score', u'sb_completion_score',
u'sb_ha_scorer',
u'sb_exec_score', u'sb_science_score', u'sb_grade_score',
u'arcorr', u'score', u'lascorr'])
else:
scores = pd.DataFrame(
columns=pd.Index(
[u'sb_cond_score', u'sb_array_score',
u'sb_completion_score', u'sb_ha_scorer', u'sb_exec_score',
u'sb_science_score', u'sb_grade_score', u'arcorr',
u'score', u'lascorr']))
if array == '12m':
self.score12m = pd.merge(
self.select12m, scores, left_on='SB_UID', right_index=True)
elif array == '7m':
self.score7m = pd.merge(
self.select7m, scores, left_on='SB_UID', right_index=True)
else:
self.scoretp = pd.merge(
self.selecttp, scores, left_on='SB_UID', right_index=True)
# noinspection PyUnboundLocalVariable
def calculate_score(self, ecount, tcount, srank, ar, aminar, amaxar,
las, grade, repfreq, dec, execu, array,
frac, maxpwvc, code, name, ha):
"""
Please go to the :ref:`Score and ranking <score>` section for an
algorithm's description.
:param ecount: Executions requested by the SB
:type ecount: Integer.
:param tcount: Total executions with QA0 Pass or Unset for the SB.
:type tcount: Integer.
:param srank: SB Science ranking.
:type srank: Intenger
:param ar: SB requested Angular Resolution, from the Science Goal.
:type ar: Float.
:param aminar: The minum angular resolution the SB can accept to be
observed.
:type aminar: Float. In arcsec.
:param amaxar: The maximum angular resolution the SB can accept to be
observed.
:type amaxar: Float. In arcsec.
:param las: SB requested Largest Angular Scale, from the Science Goal.
:type las: Float. In arcsec.
:param grade: SB's Project letter grade.
:type grade: String, can be A, B or C.
:param repfreq: SB's representative frequency.
:type repfreq: Float, in GHz.
:param dec: SB's representative declination coordinates, as determined
by self.check_observability().
:type dec: Float, in degrees.
:param execu: SB's Project Executive.
:type execu: String, can be NA, EU, EA, CL or OTHER.
:param array: Array type.
:type array: String, can be 12m, 7m or tp.
:param frac: Total time fraction calculated by self.selector() for the
SB to reach the required sensitivity.
:type frac: Float.
:param maxpwvc: SB's maxPWVC variable.
:type maxpwvc: Float, a value between 0 and 20.
:param code: SB's project code.
:type code: String.
:return: Tuple with ...
"""
sb_completion = tcount / ecount
sb_completion_score = 6. * sb_completion + 4.
# set sb priority score
if grade == 'A' and str(code).startswith('2013'):
sb_grade_score = 10.
elif str(code).startswith('2012'):
sb_grade_score = 8.
elif grade == 'B':
sb_grade_score = 4.
else:
sb_grade_score = -100.
# set science score
sb_science_score = 10. * (self.max_scirank - srank) / self.max_scirank
# set array score
if array == '7m' or array == 'tp':
sb_array_score = 10.
arcorr_or = 0.
lascorr = 0.
else:
c_bmax = 0.4001 / pd.np.cos(pd.np.radians(-23.0262015) -
pd.np.radians(dec)) + 0.6103
c_freq = repfreq / 100.
corr = c_freq / c_bmax
arcorr = ar * corr
arcorr_or = arcorr
lascorr = las * corr
if name.endswith('_TC'):
arcorr = 0.9 * amaxar
arcorr_or = arcorr
if arcorr > 3.73:
arcorr = 3.73
if arcorr < 0.41:
arcorr = 0.41
if aminar > self.array_ar and amaxar >= self.res6:
array_ar = self.res6
elif aminar > self.array_ar and amaxar < self.res6:
array_ar = (self.res6 + self.array_ar) / 2.
else:
array_ar = self.array_ar
if 0.9 * arcorr <= array_ar <= 1.1 * arcorr:
sb_array_score = 10.
elif 0.8 * arcorr < array_ar <= 1.2 * arcorr:
sb_array_score = 8.0
elif array_ar < 0.8 * arcorr: # and not points:
l = 0.8 * arcorr - aminar
sb_array_score = ((array_ar - aminar) / l) * 8.0
# elif self.array_ar < 0.8 * arcorr and points:
# sb_array_score = 8.0
elif array_ar > 1.2 * arcorr:
l = arcorr * 1.2 - amaxar
s = 8. / l
sb_array_score = (array_ar - amaxar) * s
else:
print("What happened with?")
sb_array_score = -1.
narray = 0.
for a in confDf['ALMA_RB_03'].values:
if aminar < a < amaxar:
narray += 1
if narray == 0:
print "Warning, no official configuration for %s" % name
elif narray == 1 and sb_array_score < 8.5:
sb_array_score = 9.
# set exec score:
sb_exec_score = self.exec_prio[execu]
# set condition score:
pwv_corr = 1 - (abs(self.pwv - maxpwvc) / 4.)
if pwv_corr < 0.1:
pwv_corr = 0.1
if frac < 1:
x = frac - 1.
sb_cond_score = 10 * (1 - (x ** 10.)) * pwv_corr
elif frac == 1:
sb_cond_score = 10.
else:
x = frac - 1
if frac <= 1.3:
sb_cond_score = (1. - (x / 0.3) ** 3.) * 10. * pwv_corr
else:
sb_cond_score = 0.
sb_ha_scorer = ((np.cos(np.radians((ha + 1) * 15.)) - 0.3) /
(1 - 0.3)) * 10.
score = (0.35 * sb_cond_score +
0.05 * sb_array_score +
0.20 * sb_completion_score +
0.05 * sb_exec_score +
0.05 * sb_science_score +
0.20 * sb_grade_score +
0.10 * sb_ha_scorer)
# 0.35, 0.20, 0.10, 0.05, 0.05, 0.15, 0.10
return (sb_cond_score, sb_array_score, sb_completion_score,
sb_ha_scorer, sb_exec_score, sb_science_score, sb_grade_score,
arcorr_or, score, lascorr)
def check_observability(self, array):
"""
:param array:
:return:
"""
if self.date == self.old_date:
return None
alma1.date = self.date
print(alma1.date)
if array == '12m':
fs_arr = self.fieldsource.query(
'arraySB == "TWELVE-M" and isQuery == False')
elif array == '7m':
fs_arr = self.fieldsource.query('arraySB == "ACA" or '
'arraySB == "SEVEN-M"')
else:
fs_arr = self.fieldsource.query('arraySB == "TP-Array"')
print("Calculating observability for %d sources..." %
len(fs_arr))
fs = fs_arr.apply(
lambda r: observable(
r['solarSystem'], r['sourcename'], r['RA'], r['DEC'],
self.horizon, r['isQuery'], r['ephemeris'], alma=alma1),
axis=1)
df_fs = pd.DataFrame(
fs.values.tolist(),
index=fs.index,
columns=['RA', 'DEC', 'elev', 'remaining', 'rise', 'sets', 'lstr',
'lsts', 'observable'])
fs_1 = pd.merge(
fs_arr[['fieldRef', 'SB_UID', 'isQuery', 'name', 'intendedUse']],
df_fs, left_index=True, right_index=True,
how='left')
# Libes 636-654 + donotuse query, workaround for multiple field sources
# noinspection PyUnusedLocal
donotuse = [
'Pointing Template (Cal Group)',
'Pointing Template (Science Group)', 'Amplitude', 'Phase',
'Bandpass', 'J0423 Bandpass', 'J2258 2nd Bandpass',
'Pointing', 'Check source', 'Phase calibrator', 'J1427-4206',
'Bandpass calibrator', 'AmpAux_3', 'AmpAux_1', 'PhaseCal',
'Calibrator: ', 'Polarization', 'AmpCheck1', 'AmpCheck2',
'Polarization calibrator', 'Delay', 'Focus', 'Amplitude query',
'calibrator_J1256', 'J192451-291428', 'J1911-2006', 'CheckSource',
'Check', 'check', 'J0854+201', 'phase_and_checksource',
'phase_and_checksource queries', 'phase+and_checksource',
'Bandpass_3C279', 'Field Check source ', 'Phase 2', 'Phase 3',
'Phase P', 'Bandpass (J1924-2914)', 'Pluto_May21-Jun01',
'Pluto_Jun01-11', 'Pluto_Jun11-Jun21', 'Pluto_Jun21-Jul01',
'Pluto_Jul11-21', 'Pluto_Jul21-Aug01', 'Pluto_Aug01-11',
'Pluto_Aug11-21', 'Pluto_Aug21-Sep01', 'Pluto_Sep01-11',
'Pluto_Sep11-Sep21', 'Pluto_Sep21-Oct01']
self.fs_1 = fs_1.query(
'isQuery == False and name not in @donotuse').copy()
fs_1g = fs_1.query(
'isQuery == False and name not in @donotuse').groupby('SB_UID')
allup = pd.DataFrame(
fs_1g.observable.mean())
allup.columns = pd.Index([u'up'])
fs_2 = pd.merge(fs_1, allup, left_on='SB_UID', right_index=True,
how='left')
fs_2g = fs_2.query(
'isQuery == False and name not in @donotuse').groupby('SB_UID')
etime = pd.DataFrame(
fs_2g.remaining.min()[fs_2g.remaining.min() > 1.5])
etime.columns = pd.Index([u'etime'])
elevation = pd.DataFrame(
fs_2g.elev.mean())
elevation.columns = pd.Index([u'elev'])
lstr = pd.DataFrame(
fs_2g.lstr.max())
lstr.columns = pd.Index([u'lstr'])
lsts = pd.DataFrame(
fs_2g.lsts.max())
lsts.columns = pd.Index([u'lsts'])
dec = pd.DataFrame(
fs_2g.DEC.mean())
dec.columns = pd.Index([u'DEC'])
ra = pd.DataFrame(
fs_2g.RA.mean())
ra.columns = pd.Index([u'RA'])
fs_3 = pd.merge(allup, etime, right_index=True, left_index=True,
how='left')
fs_4 = pd.merge(fs_3, elevation, right_index=True,
left_index=True, how='left')
fs_5 = pd.merge(fs_4, lstr, right_index=True,
left_index=True, how='left')
self.sb_summary.loc[dec.index, 'DEC'] = dec.loc[dec.index, 'DEC']
self.sb_summary.loc[dec.index, 'RA'] = ra.loc[dec.index, 'RA']
self.obser_prop = pd.merge(fs_5, lsts, right_index=True,
left_index=True, how='left')
self.old_date = self.date
print(self.old_date, self.date)
def set_trans(self, transmission):
"""
:param transmission:
"""
self.transmission = transmission
def set_pwv(self, pwv):
"""
:param pwv:
"""
self.pwv = min(pwn, key=lambda x: abs(x - pwv))
def set_date(self, date):
"""
:param date:
"""
self.date = date
def set_arrayar(self, ar):
"""
:param ar:
"""
self.array_ar = ar
def set_minha(self, ha):
"""
:param ha:
"""
self.minha = ha
def set_maxha(self, ha):
"""
:param ha:
"""
self.maxha = ha
def set_array_ar(self, ar):
"""
:param ar:
"""
self.array_ar = ar
def query_arrays(self):
"""
"""
a = str("select se.SE_TIMESTAMP ts1, sa.SLOG_ATTR_VALUE av1, "
"se.SE_ARRAYNAME, se.SE_ID se1 from ALMA.SHIFTLOG_ENTRIES se, "
"ALMA.SLOG_ENTRY_ATTR sa "
"WHERE se.SE_TYPE=7 and se.SE_TIMESTAMP > SYSDATE - 1/1. "
"and sa.SLOG_SE_ID = se.SE_ID and sa.SLOG_ATTR_TYPE = 31 "
"and se.SE_LOCATION='OSF-AOS' and se.SE_CORRELATORTYPE = 'BL'")
try:
self.cursor.execute(a)
self.bl_arrays = pd.DataFrame(
self.cursor.fetchall(),
columns=[rec[0] for rec in self.cursor.description]
).sort('TS1', ascending=False)
except ValueError:
self.bl_arrays = pd.DataFrame(
columns=pd.Index(
[u'TS1', u'AV1', u'SE_ARRAYNAME', u'SE1'], dtype='object'))
print("No BL arrays have been created in the last 6 hours.")
b = str("select se.SE_TIMESTAMP ts1, sa.SLOG_ATTR_VALUE av1, "
"se.SE_ARRAYNAME, se.SE_ID se1 from ALMA.SHIFTLOG_ENTRIES se, "
"ALMA.SLOG_ENTRY_ATTR sa "
"WHERE se.SE_TYPE=7 and se.SE_TIMESTAMP > SYSDATE - 1/1. "
"and sa.SLOG_SE_ID = se.SE_ID and sa.SLOG_ATTR_TYPE = 31 "
"and se.SE_LOCATION='OSF-AOS' and se.SE_CORRELATORTYPE = 'ACA'")
try:
self.cursor.execute(b)
self.aca_arrays = pd.DataFrame(
self.cursor.fetchall(),
columns=[rec[0] for rec in self.cursor.description]
).sort('TS1', ascending=False)
except ValueError:
self.aca_arrays = pd.DataFrame(
columns=pd.Index(
[u'TS1', u'AV1', u'SE_ARRAYNAME', u'SE1'], dtype='object'))
print("No ACA arrays have been created in the last 6 hours.")
def set_bl_prop(self, array_name):
"""
:param array_name:
"""
# In case a bl_array is selected
if (array_name is not None and len(self.bl_arrays) != 0 and
array_name not in self.defarrays):
id1 = self.bl_arrays.query(
'SE_ARRAYNAME == "%s"' % array_name).iloc[0].SE1
# a = str("SELECT SLOG_ATTR_VALUE FROM ALMA.SLOG_ENTRY_ATTR "
# "WHERE SLOG_ATTR_TYPE = 31 "
# "AND SLOG_SE_ID=%d" % id1)
# self.cursor.execute(a)
# ap = pd.DataFrame(self.cursor.fetchall(), columns=['antenna'])
ap = self.bl_arrays.query(
'SE_ARRAYNAME == "%s" and SE1 == %d' % (array_name, id1)
)[['AV1']]
ap.rename(columns={'AV1': 'antenna'}, inplace=True)
ap = ap[ap.antenna.str.contains('CM') == False]
conf = pd.merge(self.antpad, ap,
left_on='antenna', right_on='antenna')
conf_file = self.path + '%s.txt' % array_name
conf.to_csv(conf_file, header=False,
index=False, sep=' ')
ac = rUV.ac.ArrayConfigurationCasaFile()
ac.createCasaConfig(conf_file)
self.ruv = rUV.computeRuv(conf_file + ".cfg")
self.num_bl = len(self.ruv)
self.num_ant = len(ap)
# If default or C34 is selected
else:
if array_name is None:
conf_file = self.wto_path + 'conf/default.txt'
io_file = open(
self.wto_path + 'conf/arrayConfigurationResults.txt')
lines = io_file.readlines()
self.array_ar = float(lines[13].split(':')[1])
self.num_ant = int(lines[3].split(':')[1])
print self.num_ant
self.num_bl = self.num_ant * (self.num_ant - 1.) / 2.
io_file.close()
ac = rUV.ac.ArrayConfigurationCasaFile()
ac.createCasaConfig(conf_file)
self.ruv = rUV.computeRuv(conf_file + ".cfg")
else:
conf_file = self.wto_path + 'conf/%s.txt.cfg' % array_name
self.ruv = rUV.computeRuv(conf_file)
# noinspection PyTypeChecker
self.array_ar = self.arr_ar_def[array_name]
self.num_bl = self.num_ant * (self.num_ant - 1.) / 2.
self.num_ant = 34
# if len(self.ruv) > 33. * 17.:
# self.ruv = self.ruv[-561:]
# self.num_bl = len(self.ruv)
# self.num_ant = 34
# noinspection PyPep8Naming
def observable(solarSystem, sourcename, RA, DEC, horizon, isQuery, ephemeris,
alma):
"""
:param solarSystem:
:param sourcename:
:param RA:
:param DEC:
:param horizon:
:param isQuery:
:param ephemeris:
:param alma:
:return:
"""
dtemp = alma.date
alma.horizon = ephem.degrees(str(horizon))
if isQuery:
alma.date = dtemp
return 0, 0, 0, 0, 0, 0, 0, 0, False
if solarSystem != 'Unspecified':
ra = 0
dec = 0
if solarSystem in SSO and solarSystem == sourcename:
obj = eval('ephem.' + solarSystem + '()')
obj.compute(alma)
ra = obj.ra
dec = obj.dec
elev = obj.alt
neverup = False
elif solarSystem in MOON:
obj = eval('ephem.' + solarSystem + '()')
obj.compute(alma)
ra = obj.ra
dec = obj.dec
elev = obj.alt
obj.radius = 0.
neverup = False
elif solarSystem == 'Ephemeris':
try:
ra, dec, ephe = read_ephemeris(ephemeris, alma.date)
except TypeError:
# print(ephemeris, sourcename)
ephe = False
if not ephe:
alma.date = dtemp
print("Source %s doesn't have ephemeris for current's date" %
sourcename)
return 0, 0, 0, 0, 0, 0, 0, 0, False
obj = ephem.FixedBody()
obj._ra = pd.np.deg2rad(ra)
obj._dec = pd.np.deg2rad(dec)
obj.compute(alma)
ra = obj.ra
dec = obj.dec
elev = obj.alt
neverup = obj.neverup
print sourcename, ra, dec, elev
else:
alma.date = dtemp
return 0, 0, 0, 0, 0, 0, 0, 0, False
else:
obj = ephem.FixedBody()
obj._ra = pd.np.deg2rad(RA)
obj._dec = pd.np.deg2rad(DEC)
obj.compute(alma)
ra = obj.ra
dec = obj.dec
elev = obj.alt
neverup = obj.neverup
if obj.alt > ephem.degrees(str(horizon)):
try:
c2 = obj.circumpolar
except AttributeError:
c2 = False
if not c2:
sets = alma.next_setting(obj)
rise = alma.previous_rising(obj)
remaining = sets.datetime() - dtemp.datetime()
alma.date = rise
lstr = alma.sidereal_time()
alma.date = sets
lsts = alma.sidereal_time()
obs = True
else:
remaining = timedelta(1)
lstr = ephem.hours('0')
lsts = ephem.hours('0')
rise = ephem.hours('0')
sets = ephem.hours('0')
obs = True
else:
if neverup:
print("Source %s is never over %d deg. of elev. (%s, %s, %s)" %
(sourcename, horizon, obj.dec, obj.ra, alma.date))
remaining = timedelta(0)
alma.horizon = ephem.degrees('0')
obj.compute(alma)
lstr = ephem.hours('0')
lsts = ephem.hours('0')
rise = ephem.hours('0')
sets = ephem.hours('0')
obs = False
else:
rise = alma.next_rising(obj)
sets = alma.next_setting(obj)
remaining = dtemp.datetime() - rise.datetime()
alma.date = rise
lstr = alma.sidereal_time()
alma.date = sets
lsts = alma.sidereal_time()
obs = False
alma.date = dtemp
alma.horizon = ephem.degrees(str(horizon))
return pd.np.degrees(ra), pd.np.degrees(dec), pd.np.degrees(elev),\
remaining.total_seconds() / 3600., rise, sets, lstr, lsts, obs
def read_ephemeris(ephemeris, date):
# TODO: is the ephemeris file fixed in col positions?
"""
:param ephemeris:
:param date:
:return:
"""
in_data = False
now = date
month_ints = {
'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
found = False
for line in ephemeris.split('\n'):
if line.startswith('$$SOE'):
in_data = True
c1 = 0
elif line.startswith('$$EOE') or line.startswith(' $$EOE'):
if not found:
ra = ephem.hours('00:00:00')
dec = ephem.degrees('00:00:00')
ephe = False
return ra, dec, ephe
elif in_data:
datestr = line[1:6] + str(month_ints[line[6:9]]) + line[9:18]
date = datetime.strptime(datestr, '%Y-%m-%d %H:%M')
if now.datetime() > date:
data = line
found = False
# noinspection PyUnboundLocalVariable
c1 += 1
else:
# noinspection PyUnboundLocalVariable
if c1 == 0:
ra = ephem.hours('00:00:00')
dec = ephem.degrees('00:00:00')
ephe = False
return ra, dec, ephe
# noinspection PyUnboundLocalVariable
ra_temp = data[23:36].strip()
dec_temp = data[37:50].strip()
if len(ra_temp.split()) > 3:
ra_temp = data[23:34].strip()
dec_temp = data[35:46].strip()
ra = ephem.hours(ra_temp.replace(' ', ':'))
dec = ephem.degrees(dec_temp.replace(' ', ':'))
ephe = True
print(ra, dec, ephe, now)
return pd.np.degrees(ra), pd.np.degrees(dec), ephe
def ret_cycle(code, blfrac):
if code[:4] == '2012':
return blfrac * (31. * 16.) / (33. * 17)
else:
return blfrac * 1.
|
gpl-2.0
|
kevin-intel/scikit-learn
|
sklearn/metrics/pairwise.py
|
2
|
69283
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
from functools import partial
import warnings
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from joblib import Parallel, effective_n_jobs
from ..utils.validation import _num_samples
from ..utils.validation import check_non_negative
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches, get_chunk_n_rows
from ..utils import is_scalar_nan
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..utils._mask import _get_mask
from ..utils.fixes import delayed
from ..utils.fixes import sp_version, parse_version
from ._pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
from ..exceptions import DataConversionWarning
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = float
return X, Y, dtype
def check_pairwise_arrays(X, Y, *, precomputed=False, dtype=None,
accept_sparse='csr', force_all_finite=True,
copy=False):
"""Set X and Y appropriately and checks inputs.
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
precomputed : bool, default=False
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : str, type, list of type, default=None
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
accept_sparse : str, bool or list/tuple of str, default='csr'
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
"""Set X and Y appropriately and checks inputs for paired distances.
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, *, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation,
because this equation potentially suffers from "catastrophic cancellation".
Also, the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Y_norm_squared : array-like of shape (n_samples_Y,) or (n_samples_Y, 1) \
or (1, n_samples_Y), default=None
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
squared : bool, default=False
Return squared Euclidean distances.
X_norm_squared : array-like of shape (n_samples_X,) or (n_samples_X, 1) \
or (1, n_samples_X), default=None
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
Notes
-----
To achieve better accuracy, `X_norm_squared` and `Y_norm_squared` may be
unused if they are passed as ``float32``.
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
paired_distances : Distances betweens pairs of elements of X and Y.
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[0., 1.],
[1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
X_norm_squared = check_array(X_norm_squared, ensure_2d=False)
original_shape = X_norm_squared.shape
if X_norm_squared.shape == (X.shape[0],):
X_norm_squared = X_norm_squared.reshape(-1, 1)
if X_norm_squared.shape == (1, X.shape[0]):
X_norm_squared = X_norm_squared.T
if X_norm_squared.shape != (X.shape[0], 1):
raise ValueError(
f"Incompatible dimensions for X of shape {X.shape} and "
f"X_norm_squared of shape {original_shape}.")
if Y_norm_squared is not None:
Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False)
original_shape = Y_norm_squared.shape
if Y_norm_squared.shape == (Y.shape[0],):
Y_norm_squared = Y_norm_squared.reshape(1, -1)
if Y_norm_squared.shape == (Y.shape[0], 1):
Y_norm_squared = Y_norm_squared.T
if Y_norm_squared.shape != (1, Y.shape[0]):
raise ValueError(
f"Incompatible dimensions for Y of shape {Y.shape} and "
f"Y_norm_squared of shape {original_shape}.")
return _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, squared)
def _euclidean_distances(X, Y, X_norm_squared=None, Y_norm_squared=None,
squared=False):
"""Computational part of euclidean_distances
Assumes inputs are already checked.
If norms are passed as float32, they are unused. If arrays are passed as
float32, norms needs to be recomputed on upcast chunks.
TODO: use a float64 accumulator in row_norms to avoid the latter.
"""
if X_norm_squared is not None:
if X_norm_squared.dtype == np.float32:
XX = None
else:
XX = X_norm_squared.reshape(-1, 1)
elif X.dtype == np.float32:
XX = None
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if Y is X:
YY = None if XX is None else XX.T
else:
if Y_norm_squared is not None:
if Y_norm_squared.dtype == np.float32:
YY = None
else:
YY = Y_norm_squared.reshape(1, -1)
elif Y.dtype == np.float32:
YY = None
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X.dtype == np.float32:
# To minimize precision issues with float32, we compute the distance
# matrix on chunks of X and Y upcast to float64
distances = _euclidean_distances_upcast(X, XX, Y, YY)
else:
# if dtype is already float64, no need to chunk and upcast
distances = - 2 * safe_sparse_dot(X, Y.T, dense_output=True)
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
if X is Y:
np.fill_diagonal(distances, 0)
return distances if squared else np.sqrt(distances, out=distances)
def nan_euclidean_distances(X, Y=None, *, squared=False,
missing_values=np.nan, copy=True):
"""Calculate the euclidean distances in the presence of missing values.
Compute the euclidean distance between each pair of samples in X and Y,
where Y=X is assumed if Y=None. When calculating the distance between a
pair of samples, this formulation ignores feature coordinates with a
missing value in either sample and scales up the weight of the remaining
coordinates:
dist(x,y) = sqrt(weight * sq. distance from present coordinates)
where,
weight = Total # of coordinates / # of present coordinates
For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``
is:
.. math::
\\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)}
If all the coordinates are missing or if there are no common present
coordinates then NaN is returned for that pair.
Read more in the :ref:`User Guide <metrics>`.
.. versionadded:: 0.22
Parameters
----------
X : array-like of shape=(n_samples_X, n_features)
Y : array-like of shape=(n_samples_Y, n_features), default=None
squared : bool, default=False
Return squared Euclidean distances.
missing_values : np.nan or int, default=np.nan
Representation of missing value.
copy : bool, default=True
Make and use a deep copy of X and Y (if Y exists).
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
paired_distances : Distances between pairs of elements of X and Y.
Examples
--------
>>> from sklearn.metrics.pairwise import nan_euclidean_distances
>>> nan = float("NaN")
>>> X = [[0, 1], [1, nan]]
>>> nan_euclidean_distances(X, X) # distance between rows of X
array([[0. , 1.41421356],
[1.41421356, 0. ]])
>>> # get distance to origin
>>> nan_euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
References
----------
* John K. Dixon, "Pattern Recognition with Partly Missing Data",
IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:
10, pp. 617 - 621, Oct. 1979.
http://ieeexplore.ieee.org/abstract/document/4310090/
"""
force_all_finite = 'allow-nan' if is_scalar_nan(missing_values) else True
X, Y = check_pairwise_arrays(X, Y, accept_sparse=False,
force_all_finite=force_all_finite, copy=copy)
# Get missing mask for X
missing_X = _get_mask(X, missing_values)
# Get missing mask for Y
missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)
# set missing values to zero
X[missing_X] = 0
Y[missing_Y] = 0
distances = euclidean_distances(X, Y, squared=True)
# Adjust distances for missing values
XX = X * X
YY = Y * Y
distances -= np.dot(XX, missing_Y.T)
distances -= np.dot(missing_X, YY.T)
np.clip(distances, 0, None, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
np.fill_diagonal(distances, 0.0)
present_X = 1 - missing_X
present_Y = present_X if Y is X else ~missing_Y
present_count = np.dot(present_X, present_Y.T)
distances[present_count == 0] = np.nan
# avoid divide by zero
np.maximum(1, present_count, out=present_count)
distances /= present_count
distances *= X.shape[1]
if not squared:
np.sqrt(distances, out=distances)
return distances
def _euclidean_distances_upcast(X, XX=None, Y=None, YY=None, batch_size=None):
"""Euclidean distances between X and Y.
Assumes X and Y have float32 dtype.
Assumes XX and YY have float64 dtype or are None.
X and Y are upcast to float64 by chunks, which size is chosen to limit
memory increase by approximately 10% (at least 10MiB).
"""
n_samples_X = X.shape[0]
n_samples_Y = Y.shape[0]
n_features = X.shape[1]
distances = np.empty((n_samples_X, n_samples_Y), dtype=np.float32)
if batch_size is None:
x_density = X.nnz / np.prod(X.shape) if issparse(X) else 1
y_density = Y.nnz / np.prod(Y.shape) if issparse(Y) else 1
# Allow 10% more memory than X, Y and the distance matrix take (at
# least 10MiB)
maxmem = max(
((x_density * n_samples_X + y_density * n_samples_Y) * n_features
+ (x_density * n_samples_X * y_density * n_samples_Y)) / 10,
10 * 2 ** 17)
# The increase amount of memory in 8-byte blocks is:
# - x_density * batch_size * n_features (copy of chunk of X)
# - y_density * batch_size * n_features (copy of chunk of Y)
# - batch_size * batch_size (chunk of distance matrix)
# Hence x² + (xd+yd)kx = M, where x=batch_size, k=n_features, M=maxmem
# xd=x_density and yd=y_density
tmp = (x_density + y_density) * n_features
batch_size = (-tmp + np.sqrt(tmp ** 2 + 4 * maxmem)) / 2
batch_size = max(int(batch_size), 1)
x_batches = gen_batches(n_samples_X, batch_size)
for i, x_slice in enumerate(x_batches):
X_chunk = X[x_slice].astype(np.float64)
if XX is None:
XX_chunk = row_norms(X_chunk, squared=True)[:, np.newaxis]
else:
XX_chunk = XX[x_slice]
y_batches = gen_batches(n_samples_Y, batch_size)
for j, y_slice in enumerate(y_batches):
if X is Y and j < i:
# when X is Y the distance matrix is symmetric so we only need
# to compute half of it.
d = distances[y_slice, x_slice].T
else:
Y_chunk = Y[y_slice].astype(np.float64)
if YY is None:
YY_chunk = row_norms(Y_chunk, squared=True)[np.newaxis, :]
else:
YY_chunk = YY[:, y_slice]
d = -2 * safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True)
d += XX_chunk
d += YY_chunk
distances[x_slice, y_slice] = d.astype(np.float32, copy=False)
return distances
def _argmin_min_reduce(dist, start):
indices = dist.argmin(axis=1)
values = dist[np.arange(dist.shape[0]), indices]
return indices, values
def pairwise_distances_argmin_min(X, Y, *, axis=1, metric="euclidean",
metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Array containing points.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Array containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default='euclidean'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See Also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
indices, values = zip(*pairwise_distances_chunked(
X, Y, reduce_func=_argmin_min_reduce, metric=metric,
**metric_kwargs))
indices = np.concatenate(indices)
values = np.concatenate(values)
return indices, values
def pairwise_distances_argmin(X, Y, *, axis=1, metric="euclidean",
metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Array containing points.
Y : array-like of shape (n_samples_Y, n_features)
Arrays containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default="euclidean"
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See Also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis=axis, metric=metric,
metric_kwargs=metric_kwargs)[0]
def haversine_distances(X, Y=None):
"""Compute the Haversine distance between samples in X and Y.
The Haversine (or great circle) distance is the angular distance between
two points on the surface of a sphere. The first coordinate of each point
is assumed to be the latitude, the second is the longitude, given
in radians. The dimension of the data must be 2.
.. math::
D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x1 - y1) / 2)
+ \\cos(x1)\\cos(y1)\\sin^2((x2 - y2) / 2)}]
Parameters
----------
X : array-like of shape (n_samples_X, 2)
Y : array-like of shape (n_samples_Y, 2), default=None
Returns
-------
distance : ndarray of shape (n_samples_X, n_samples_Y)
Notes
-----
As the Earth is nearly spherical, the haversine formula provides a good
approximation of the distance between two points of the Earth surface, with
a less than 1% error on average.
Examples
--------
We want to calculate the distance between the Ezeiza Airport
(Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris,
France).
>>> from sklearn.metrics.pairwise import haversine_distances
>>> from math import radians
>>> bsas = [-34.83333, -58.5166646]
>>> paris = [49.0083899664, 2.53844117956]
>>> bsas_in_radians = [radians(_) for _ in bsas]
>>> paris_in_radians = [radians(_) for _ in paris]
>>> result = haversine_distances([bsas_in_radians, paris_in_radians])
>>> result * 6371000/1000 # multiply by Earth radius to get kilometers
array([[ 0. , 11099.54035582],
[11099.54035582, 0. ]])
"""
from ..neighbors import DistanceMetric
return DistanceMetric.get_metric('haversine').pairwise(X, Y)
def manhattan_distances(X, Y=None, *, sum_over_features=True):
"""Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array-like of shape (n_samples_Y, n_features), default=None
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
Returns
-------
D : ndarray of shape (n_samples_X * n_samples_Y, n_features) or \
(n_samples_X, n_samples_Y)
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Notes
--------
When X and/or Y are CSR sparse matrices and they are not already
in canonical format, this function modifies them in-place to
make them canonical.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])
array([[0.]])
>>> manhattan_distances([[3]], [[2]])
array([[1.]])
>>> manhattan_distances([[2]], [[3]])
array([[1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])
array([[0., 2.],
[4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = np.full((2, 2), 2.)
>>> manhattan_distances(X, y, sum_over_features=False)
array([[1., 1.],
[1., 1.]])
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
X.sum_duplicates() # this also sorts indices in-place
Y.sum_duplicates()
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Matrix `X`.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Matrix `Y`.
Returns
-------
distance matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
cosine_similarity
scipy.spatial.distance.cosine : Dense matrices only.
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Y : array-like of shape (n_samples, n_features)
Returns
-------
distances : ndarray of shape (n_samples,)
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Y : array-like of shape (n_samples, n_features)
Returns
-------
distances : ndarray of shape (n_samples,)
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Y : array-like of shape (n_samples, n_features)
Returns
-------
distances : ndarray of shape (n_samples,)
Notes
-----
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm.
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, *, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray of shape (n_samples, n_features)
Array 2 for distance computation.
metric : str or callable, default="euclidean"
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray of shape (n_samples,)
See Also
--------
pairwise_distances : Computes the distance between every pair of samples.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([0., 1.])
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
degree : int, default=3
gamma : float, default=None
If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
gamma : float, default=None
If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples_X, n_features)
Input data.
Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T,
dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and
Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
additive_chi2_kernel : The additive version of this kernel.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'haversine': haversine_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
'nan_euclidean': nan_euclidean_distances,
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'haversine' metrics.pairwise.haversine_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
'nan_euclidean' metrics.pairwise.nan_euclidean_distances
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs):
"""Write in-place to a slice of a distance matrix."""
dist_matrix[:, slice_] = dist_func(*args, **kwargs)
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel."""
if Y is None:
Y = X
X, Y, dtype = _return_float_dtype(X, Y)
if effective_n_jobs(n_jobs) == 1:
return func(X, Y, **kwds)
# enforce a threading backend to prevent data communication overhead
fd = delayed(_dist_wrapper)
ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order='F')
Parallel(backend="threading", n_jobs=n_jobs)(
fd(func, ret, s, X, Y[s], **kwds)
for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs)))
if (X is Y or Y is None) and func is euclidean_distances:
# zeroing diagonal for euclidean norm.
# TODO: do it also for other norms.
np.fill_diagonal(ret, 0)
return ret
def _pairwise_callable(X, Y, metric, force_all_finite=True, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}.
"""
X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski",
'nan_euclidean', 'haversine']
_NAN_METRICS = ['nan_euclidean']
def _check_chunk_size(reduced, chunk_size):
"""Checks chunk is a sequence of expected size or a tuple of same.
"""
if reduced is None:
return
is_tuple = isinstance(reduced, tuple)
if not is_tuple:
reduced = (reduced,)
if any(isinstance(r, tuple) or not hasattr(r, '__iter__')
for r in reduced):
raise TypeError('reduce_func returned %r. '
'Expected sequence(s) of length %d.' %
(reduced if is_tuple else reduced[0], chunk_size))
if any(_num_samples(r) != chunk_size for r in reduced):
actual_size = tuple(_num_samples(r) for r in reduced)
raise ValueError('reduce_func returned object of length %s. '
'Expected same length as input: %d.' %
(actual_size if is_tuple else actual_size[0],
chunk_size))
def _precompute_metric_params(X, Y, metric=None, **kwds):
"""Precompute data-derived metric parameters if not provided.
"""
if metric == "seuclidean" and 'V' not in kwds:
# There is a bug in scipy < 1.5 that will cause a crash if
# X.dtype != np.double (float64). See PR #15730
dtype = np.float64 if sp_version < parse_version('1.5') else None
if X is Y:
V = np.var(X, axis=0, ddof=1, dtype=dtype)
else:
raise ValueError(
"The 'V' parameter is required for the seuclidean metric "
"when Y is passed.")
return {'V': V}
if metric == "mahalanobis" and 'VI' not in kwds:
if X is Y:
VI = np.linalg.inv(np.cov(X.T)).T
else:
raise ValueError(
"The 'VI' parameter is required for the mahalanobis metric "
"when Y is passed.")
return {'VI': VI}
return {}
def pairwise_distances_chunked(X, Y=None, *, reduce_func=None,
metric='euclidean', n_jobs=None,
working_memory=None, **kwds):
"""Generate a distance matrix chunk by chunk with optional reduction.
In cases where not all of a pairwise distance matrix needs to be stored at
once, this is used to calculate pairwise distances in
``working_memory``-sized chunks. If ``reduce_func`` is given, it is run
on each chunk and its return values are concatenated into lists, arrays
or sparse matrices.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape the array should be (n_samples_X, n_samples_X) if
metric='precomputed' and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
reduce_func : callable, default=None
The function which is applied on each chunk of the distance matrix,
reducing it to needed values. ``reduce_func(D_chunk, start)``
is called repeatedly, where ``D_chunk`` is a contiguous vertical
slice of the pairwise distance matrix, starting at row ``start``.
It should return one of: None; an array, a list, or a sparse matrix
of length ``D_chunk.shape[0]``; or a tuple of such objects. Returning
None is useful for in-place operations, rather than reductions.
If None, pairwise_distances_chunked returns a generator of vertical
chunks of the distance matrix.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
working_memory : int, default=None
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Yields
------
D_chunk : {ndarray, sparse matrix}
A contiguous slice of distance matrix, optionally processed by
``reduce_func``.
Examples
--------
Without reduce_func:
>>> import numpy as np
>>> from sklearn.metrics import pairwise_distances_chunked
>>> X = np.random.RandomState(0).rand(5, 3)
>>> D_chunk = next(pairwise_distances_chunked(X))
>>> D_chunk
array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...],
[0.29..., 0. ..., 0.57..., 0.41..., 0.76...],
[0.41..., 0.57..., 0. ..., 0.44..., 0.90...],
[0.19..., 0.41..., 0.44..., 0. ..., 0.51...],
[0.57..., 0.76..., 0.90..., 0.51..., 0. ...]])
Retrieve all neighbors and average distance within radius r:
>>> r = .2
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r) for d in D_chunk]
... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1)
... return neigh, avg_dist
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func)
>>> neigh, avg_dist = next(gen)
>>> neigh
[array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])]
>>> avg_dist
array([0.039..., 0. , 0. , 0.039..., 0. ])
Where r is defined per sample, we need to make use of ``start``:
>>> r = [.2, .4, .4, .3, .1]
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r[i])
... for i, d in enumerate(D_chunk, start)]
... return neigh
>>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func))
>>> neigh
[array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])]
Force row-by-row generation by reducing ``working_memory``:
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func,
... working_memory=0)
>>> next(gen)
[array([0, 3])]
>>> next(gen)
[array([0, 1])]
"""
n_samples_X = _num_samples(X)
if metric == 'precomputed':
slices = (slice(0, n_samples_X),)
else:
if Y is None:
Y = X
# We get as many rows as possible within our working_memory budget to
# store len(Y) distances in each row of output.
#
# Note:
# - this will get at least 1 row, even if 1 row of distances will
# exceed working_memory.
# - this does not account for any temporary memory usage while
# calculating distances (e.g. difference of vectors in manhattan
# distance.
chunk_n_rows = get_chunk_n_rows(row_bytes=8 * _num_samples(Y),
max_n_rows=n_samples_X,
working_memory=working_memory)
slices = gen_batches(n_samples_X, chunk_n_rows)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
for sl in slices:
if sl.start == 0 and sl.stop == n_samples_X:
X_chunk = X # enable optimised paths for X is Y
else:
X_chunk = X[sl]
D_chunk = pairwise_distances(X_chunk, Y, metric=metric,
n_jobs=n_jobs, **kwds)
if ((X is Y or Y is None)
and PAIRWISE_DISTANCE_FUNCTIONS.get(metric, None)
is euclidean_distances):
# zeroing diagonal, taking care of aliases of "euclidean",
# i.e. "l2"
D_chunk.flat[sl.start::_num_samples(X) + 1] = 0
if reduce_func is not None:
chunk_size = D_chunk.shape[0]
D_chunk = reduce_func(D_chunk, sl.start)
_check_chunk_size(D_chunk, chunk_size)
yield D_chunk
def pairwise_distances(X, Y=None, metric="euclidean", *, n_jobs=None,
force_all_finite=True, **kwds):
"""Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix
inputs.
['nan_euclidean'] but it does not yet support sparse matrices.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored
for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See Also
--------
pairwise_distances_chunked : Performs the same calculation as this
function, but returns a generator of chunks of the distance matrix, in
order to limit memory usage.
paired_distances : Computes the distances between corresponding elements
of two arrays.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True,
force_all_finite=force_all_finite)
whom = ("`pairwise_distances`. Precomputed distance "
" need to have non-negative values.")
check_non_negative(X, whom=whom)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric,
force_all_finite=force_all_finite, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
if (dtype == bool and
(X.dtype != bool or (Y is not None and Y.dtype != bool))):
msg = "Data was converted to boolean for metric %s" % metric
warnings.warn(msg, DataConversionWarning)
X, Y = check_pairwise_arrays(X, Y, dtype=dtype,
force_all_finite=force_all_finite)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
if effective_n_jobs(n_jobs) == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances require boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
"""Valid metrics for pairwise_kernels.
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": frozenset(["gamma"]),
"cosine": (),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", *, filter_params=False,
n_jobs=None, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',
'laplacian', 'sigmoid', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
A second feature array only if X has shape (n_samples_X, n_features).
metric : str or callable, default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number. This means that callables from
:mod:`sklearn.metrics.pairwise` are not allowed, as they operate on
matrices, not single samples. Use the string identifying the kernel
instead.
filter_params : bool, default=False
Whether to filter invalid parameters or not.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = {k: kwds[k] for k in kwds
if k in KERNEL_PARAMS[metric]}
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
bsd-3-clause
|
eg-zhang/scikit-learn
|
sklearn/linear_model/passive_aggressive.py
|
97
|
10879
|
# Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
|
bsd-3-clause
|
nmartensen/pandas
|
pandas/core/sparse/scipy_sparse.py
|
3
|
5678
|
"""
Interaction with scipy.sparse matrices.
Currently only includes SparseSeries.to_coo helpers.
"""
from pandas.core.index import MultiIndex, Index
from pandas.core.series import Series
from pandas.compat import OrderedDict, lmap
def _check_is_partition(parts, whole):
whole = set(whole)
parts = [set(x) for x in parts]
if set.intersection(*parts) != set():
raise ValueError(
'Is not a partition because intersection is not null.')
if set.union(*parts) != whole:
raise ValueError('Is not a partition because union is not the whole.')
def _to_ijv(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
""" For arbitrary (MultiIndexed) SparseSeries return
(v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for
passing to scipy.sparse.coo constructor. """
# index and column levels must be a partition of the index
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
# from the SparseSeries: get the labels and data for non-null entries
values = ss._data.internal_values()._valid_sp_values
nonnull_labels = ss.dropna()
def get_indexers(levels):
""" Return sparse coords and dense labels for subset levels """
# TODO: how to do this better? cleanly slice nonnull_labels given the
# coord
values_ilabels = [tuple(x[i] for i in levels)
for x in nonnull_labels.index]
if len(levels) == 1:
values_ilabels = [x[0] for x in values_ilabels]
# # performance issues with groupby ###################################
# TODO: these two lines can rejplace the code below but
# groupby is too slow (in some cases at least)
# labels_to_i = ss.groupby(level=levels, sort=sort_labels).first()
# labels_to_i[:] = np.arange(labels_to_i.shape[0])
def _get_label_to_i_dict(labels, sort_labels=False):
""" Return OrderedDict of unique labels to number.
Optionally sort by label.
"""
labels = Index(lmap(tuple, labels)).unique().tolist() # squish
if sort_labels:
labels = sorted(list(labels))
d = OrderedDict((k, i) for i, k in enumerate(labels))
return (d)
def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
def robust_get_level_values(i):
# if index has labels (that are not None) use those,
# else use the level location
try:
return index.get_level_values(index.names[i])
except KeyError:
return index.get_level_values(i)
ilabels = list(zip(*[robust_get_level_values(i) for i in subset]))
labels_to_i = _get_label_to_i_dict(ilabels,
sort_labels=sort_labels)
labels_to_i = Series(labels_to_i)
if len(subset) > 1:
labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index)
labels_to_i.index.names = [index.names[i] for i in subset]
else:
labels_to_i.index = Index(x[0] for x in labels_to_i.index)
labels_to_i.index.name = index.names[subset[0]]
labels_to_i.name = 'value'
return (labels_to_i)
labels_to_i = _get_index_subset_to_coord_dict(ss.index, levels,
sort_labels=sort_labels)
# #####################################################################
# #####################################################################
i_coord = labels_to_i[values_ilabels].tolist()
i_labels = labels_to_i.index.tolist()
return i_coord, i_labels
i_coord, i_labels = get_indexers(row_levels)
j_coord, j_labels = get_indexers(column_levels)
return values, i_coord, j_coord, i_labels, j_labels
def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ),
sort_labels=False):
""" Convert a SparseSeries to a scipy.sparse.coo_matrix using index
levels row_levels, column_levels as the row and column
labels respectively. Returns the sparse_matrix, row and column labels.
"""
import scipy.sparse
if ss.index.nlevels < 2:
raise ValueError('to_coo requires MultiIndex with nlevels > 2')
if not ss.index.is_unique:
raise ValueError('Duplicate index entries are not allowed in to_coo '
'transformation.')
# to keep things simple, only rely on integer indexing (not labels)
row_levels = [ss.index._get_level_number(x) for x in row_levels]
column_levels = [ss.index._get_level_number(x) for x in column_levels]
v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels,
column_levels=column_levels,
sort_labels=sort_labels)
sparse_matrix = scipy.sparse.coo_matrix(
(v, (i, j)), shape=(len(rows), len(columns)))
return sparse_matrix, rows, columns
def _coo_to_sparse_series(A, dense_index=False):
""" Convert a scipy.sparse.coo_matrix to a SparseSeries.
Use the defaults given in the SparseSeries constructor.
"""
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
s = s.sort_index()
s = s.to_sparse() # TODO: specify kind?
if dense_index:
# is there a better constructor method to use here?
i = range(A.shape[0])
j = range(A.shape[1])
ind = MultiIndex.from_product([i, j])
s = s.reindex_axis(ind)
return s
|
bsd-3-clause
|
rain1024/sklearn_tutorial
|
doc/skeletons/exercise_02.py
|
4
|
6244
|
"""
Astronomy Tutorial: exercise 2
Photometric redshift determination
usage: python exercise_02.py datadir
- datadir is $TUTORIAL_DIR/data/sdss_photoz
This directory should contain the files:
- sdss_photoz.npy
Here we will take a closer look at the photometric redshift problem discussed
in section 5 of the tutorial. Using the decision tree classifier, we'll take
a look at the 4-color observations of just over 400,000 points.
The point of this exercise is to answer the question: how can we get the rms
error down to below 0.1? Would it be a better use of telescope time to
observe more objects, or to observe additional features of the objects
in the data set? We'll use the techniques discussed in section 3 of the
tutorial.
"""
import os, sys
import numpy as np
import pylab as pl
from sklearn.tree import DecisionTreeRegressor
from sklearn import metrics
try:
datadir = sys.argv[1]
except:
print __doc__
sys.exit()
def compute_rms_error(y_pred, y_true):
"""Compute the rms error between the arrays y_pred and y_true"""
return np.sqrt(metrics.mean_squared_error(y_pred, y_true))
def compute_outlier_fraction(y_pred, y_true, cutoff=0.2):
"""Compute the outlier rate between the arrays y_pred and y_true"""
return np.sum((abs(y_pred - y_true) > cutoff)) * 1. / len(y_pred)
#------------------------------------------------------------
# load data and compute colors
data = np.load(os.path.join(datadir, 'sdss_photoz.npy'))
# here we'll truncate the data to 50,000 points. This will allow the code
# below to be run quickly while it's being written. When you're satisfied
# that the code is ready to go, you can comment out this line.
data = data[:50000]
print '%i points' % data.shape[0]
u, g, r, i, z = [data[f] for f in 'ugriz']
X = np.zeros((len(data), 4))
X[:, 0] = u - g
X[:, 1] = g - r
X[:, 2] = r - i
X[:, 3] = i - z
y = data['redshift']
#------------------------------------------------------------
# divide into training, cross-validation, and test samples
Ntot = len(y)
Ncv = Ntot / 5
Ntest = Ntot / 5
Ntrain = Ntot - Ncv - Ntest
X_train = X[:Ntrain]
y_train = y[:Ntrain]
X_cv = X[Ntrain:Ntrain + Ncv]
y_cv = y[Ntrain:Ntrain + Ncv]
X_test = X[Ntrain + Ncv:]
y_test = y[Ntrain + Ncv:]
#------------------------------------------------------------
# plot the Decision Tree error as a function of max_depth parameter
#
# This is the first main part of the exercise. This is photometric
# redshift determination using DecisionTreeRegressor. Here you'll plot
# the training error and cross-validation error as a function of the
# meta-parameter 'max_depth'.
#
# You will create three arrays: max_depth_array, train_error, and cv_error.
# Use at least 10 different values of max_depth, and compute the training
# and cross-validation error associated with each of them.
#
# note that the error can be computed with the function compute_rms_error()
max_depth_array = []
train_error = []
cv_error = []
# TODO: compute the arrays max_depth_array, train_error, and cv_error
pl.figure()
pl.plot(max_depth_array, cv_error, label='cross-val error')
pl.plot(max_depth_array, train_error, label='training error')
pl.legend()
pl.xlabel('max depth')
pl.ylabel('error')
# select the value of max_depth which led to the best results
max_depth = max_depth_array[np.argmin(cv_error)]
print "max_depth = %i" % max_depth
#------------------------------------------------------------
# plot the Decision Tree error as a function of number of samples
#
# This is the second main part of the exercise. Here you'll plot the
# training error and cross-validation error as a function of the
# number of training samples.
#
# You will create three arrays: n_samples_array, train_error, and cv_error.
# Use at least 40 different values of n_samples, and compute the training
# and cross-validation error associated with each of them.
#
# Make sure that when computing the training error for each number of
# samples, you use the same samples that the model was trained on.
n_samples_array = []
train_error = []
cv_error = []
# TODO: compute the arrays n_samples_array, train_error, and cv_error
pl.figure()
pl.plot(n_samples_array, cv_error, label='cross-val error')
pl.plot(n_samples_array, train_error, label='training error')
pl.legend()
pl.xlabel('number of samples')
pl.ylabel('error')
#----------------------------------------------------------------------
# Use the whole dataset:
# If you have been running your code on only a part of the dataset,
# now that you have it working, you can run it on the full dataset
# (note: this will take a long time to execute!) You can do this by
# commenting out the line
# data = data[:50000]
# above. How does this change the results?
#------------------------------------------------------------
# Catastrophic Outliers
# Though the rms error is one useful measure of the performance of an
# algorithm, astronomers are often more interested in reducing the
# 'catastrophic outlier' rate. Catastrophic outliers are points which
# are given redshifts very far from the true value. For accuracy of
# cosmological results, this is often more important than the overall
# rms error.
#
# Here, you can re-implement te above tasks, plotting the catastrophic
# outlier rate as a function of the max_depth parameter, and as a function
# of the number of training points. This can be accomplished either by
# copying and pasting the above code here, or by modifying the above code.
#
# To compute the catastrophic error rate, you can use the function
# compute_outlier_fraction()
# TODO: repeat the above two plots using catastrophic error rate
#----------------------------------------------------------------------
# Analyze the results
#
# Compare your results to the discussion of bias and variance in section
# 3. How do you think these results could be improved? Is it better to
# spend telescope time increasing the size of the training set, or would
# it be better to measure more features of the objects we already have?
# Does this recommendation change if the astronomer is interested in
# minimizing the number of catastrophic outliers rather than the rms error?
pl.show()
|
bsd-3-clause
|
HolgerPeters/scikit-learn
|
sklearn/metrics/tests/test_pairwise.py
|
42
|
27323
|
import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
def test_cosine_distances():
# Check the pairwise Cosine distances computation
rng = np.random.RandomState(1337)
x = np.abs(rng.rand(910))
XA = np.vstack([x, x])
D = cosine_distances(XA)
assert_array_almost_equal(D, [[0., 0.], [0., 0.]])
# check that all elements are in [0, 2]
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0., 0.])
XB = np.vstack([x, -x])
D2 = cosine_distances(XB)
# check that all elements are in [0, 2]
assert_true(np.all(D2 >= 0.))
assert_true(np.all(D2 <= 2.))
# check that diagonal elements are equal to 0 and non diagonal to 2
assert_array_almost_equal(D2, [[0., 2.], [2., 0.]])
# check large random matrix
X = np.abs(rng.rand(1000, 5000))
D = cosine_distances(X)
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0.] * D.shape[0])
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
|
bsd-3-clause
|
sinhrks/scikit-learn
|
sklearn/datasets/__init__.py
|
72
|
3807
|
"""
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.