repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cl4rke/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
lhillber/qops | spectrum_statistics.py | 1 | 5109 | import numpy as np
from numpy.linalg import eigvalsh, eigh, matrix_power
import matplotlib.pyplot as plt
from scipy.special import gamma
from scipy.optimize import curve_fit
from qca import multipage
from figure3 import select, moving_average
# plotting defaults
import matplotlib as mpl
mpl.rcParams["text.latex.preamble"] = [r"\usepackage{amsmath}"]
font = {"size": 9, "weight": "normal"}
mpl.rcParams["mathtext.fontset"] = "stix"
mpl.rcParams["font.family"] = "STIXGeneral"
mpl.rcParams["pdf.fonttype"] = 42
mpl.rc("font", **font)
def brody_fit(x, n):
def brody_func(x, eta, A):
b = (gamma((eta + 2) / (eta + 1))) ** (eta + 1.0)
return A * b * (eta + 1.0) * x ** eta * np.exp(-b * x ** (eta + 1.0))
popt, pcov = curve_fit(brody_func, x, n, p0=[0.0, 1.0], bounds=[0, 1])
def func(x):
return brody_func(x, *popt)
return func, popt, pcov
from measures import renyi_entropy
L = 18
IC = "c1_f0"
Skey = [13, 14, 1, 6]
cs = ["darkturquoise", "darkorange", "limegreen", "crimson"]
for j, (c, S) in enumerate(zip(cs, Skey)):
sim = select(L, S, IC, V="H", BC="0")
h5file = sim["h5file"]
d = h5file["cut_half"][:]
for ti, rho in enumerate(d[100:101]):
spec, vecs = eigh(rho)
fig, axs = plt.subplots(1, 2, figsize=(6, 3))
print(renyi_entropy(rho))
axs[0].set_title("spectrum")
axs[0].semilogy(spec, color=c, marker="o")
axs[1].set_title("column vector magnitude")
axs[1].imshow(np.abs(vecs), cmap="gist_gray_r")
fig.suptitle("$T_{%d}$"%S)
multipage("figures/figure4/eigenRDM.pdf")
plt.close("all")
print("done")
if __name__ == "__main__":
L = 18
IC = "c1_f0"
Skey = [13, 14, 1, 6]
cs = ["darkturquoise", "darkorange", "limegreen", "crimson"]
fig, axs = plt.subplots(1, 1, figsize=(4, 3), sharex=False)
fig2, axs2 = plt.subplots(1, 1, figsize=(3, 2), sharex=True)
fig2.subplots_adjust(left=0.2, bottom=0.2, hspace=0.1)
fig3s = []
for j, (c, S) in enumerate(zip(cs, Skey)):
sim = select(L, S, IC, V="H", BC="0")
h5file = sim["h5file"]
try:
espec = h5file["espec"]
except:
d = h5file["cut_half"]
espec = np.zeros((d.shape[0], d.shape[1]))
for ti, rho in enumerate(d):
espec[ti, :] = eigvalsh(rho)
h5file["espec"] = espec
etas = []
detas = []
svns = []
ii = 0
t0 = 10
fig3, axs3 = plt.subplots(3, 3, figsize=(4, 4), sharex=True, sharey=True)
for ti, es in enumerate(espec[t0:1000]):
es = es[es > 1e-6]
NN = len(es)
es = np.sort(es)
es = es[NN // 3 : 2 * NN // 3]
ns = range(len(es))
s = es[1:] - es[:-1]
s /= np.mean(s)
n, bin, _ = axs.hist(
s, density=True, alpha=1, histtype="step", bins=10, log=False
)
x = (bin[1:] + bin[:-1]) / 2.0
xs = np.linspace(x[0], x[-1], 100)
xs = xs[xs > 0]
func, popt, pcov = brody_fit(x, n)
detas.append(np.sqrt(np.diag(pcov)[0]))
etas.append(popt[0])
if (ti+t0) % 100 == 0:
row, col = ii // 3, ii % 3
ax3 = axs3[row, col]
dx = x[1] - x[0]
n = np.insert(n, 0, 0)
n = np.insert(n, len(n), 0)
x = np.insert(x, 0, x[0] - dx / 2)
x = np.insert(x, len(x), x[-1] + dx / 2)
ax3.step(x, n, where="mid")
ax3.plot(xs, func(xs))
ax3.set_title(f"t={t0+ti}", pad=-13)
fig3.suptitle("$R = %d$" % S)
ii += 1
if col == 1 and row == 2:
ax3.set_xlabel("$\delta E/\overline{\delta E}$")
if col == 0 and row == 1:
ax3.set_ylabel("density")
ax3.tick_params(direction="inout")
fig3.subplots_adjust(hspace=0, wspace=0)
fig3s.append(fig3)
etas = np.array(etas)
detas = np.array(detas)
ts = np.arange(2, len(etas) + 2)
mask = detas < 1
etas = etas[mask]
detas = detas[mask]
ts = ts[mask]
if S == 6:
pass
else:
if S == 13:
label = r"$R = %s$" % S
else:
label = str(S)
aetas = moving_average(etas, n=L)
axs2.plot(aetas, marker=None, color=c, label=label, lw=1)
avgerr = np.mean(detas)
axs2.plot(ts, etas, c=c, alpha=0.3)
#axs2.errorbar(ts, etas, yerr=detas, color=c)
axs2.set_xticks([0, 250, 500, 750, 1000])
axs2.legend(loc="lower right")
axs.set_xlabel("$\delta E / \overline{\delta E}$")
axs2.set_xlabel("$t$")
axs2.set_ylabel("$\eta$")
fig.tight_layout()
fig2.tight_layout()
multipage(
"figures/figure4/spectrum_statistics_fixed-10bins.pdf",
figs=[fig2] + fig3s,
clip=True,
dpi=10 * fig.dpi,
)
| mit |
NelisVerhoef/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
dnidever/noaosourcecatalog | python/nsc_instcal_combine_qacuts.py | 1 | 31160 | #!/usr/bin/env python
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table, vstack, Column
from astropy.time import Time
import healpy as hp
from dlnpyutils import utils as dln, coords
#import subprocess
import time
from argparse import ArgumentParser
import socket
#from dustmaps.sfd import SFDQuery
from astropy.coordinates import SkyCoord
#from sklearn.cluster import DBSCAN
#from scipy.optimize import least_squares
#from scipy.interpolate import interp1d
# Combine data for one NSC healpix region
if __name__ == "__main__":
parser = ArgumentParser(description='Combine NSC Instcal Catalogs.')
parser.add_argument('version', type=str, nargs=1, help='Version number')
parser.add_argument('--makelist', action='store_true', help='Make healpix list')
parser.add_argument('-r','--redo', action='store_true', help='Redo this HEALPIX')
parser.add_argument('--nmulti', type=int, default=20, help='Number of jobs to run')
parser.add_argument('--nocuts', action='store_true', help='Do not apply any quality cuts')
args = parser.parse_args()
t0 = time.time()
hostname = socket.gethostname()
host = hostname.split('.')[0]
# Inputs
version = args.version
redo = args.redo
makelist = args.makelist
nmulti = args.nmulti
nocuts = args.nocuts
nside = 128
radeg = 180 / np.pi
# on thing/hulk use
if (host == "thing") or (host == "hulk"):
basedir = "/net/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/mss1/"
localdir = "/d0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
# on gp09 use
if (host == "gp09") or (host == "gp08") or (host == "gp07") or (host == "gp06") or (host == "gp05"):
basedir = "/net/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/net/mss1/"
localdir = "/data0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
t0 = time.time()
# Combine all of the data
if ~os.path.exists(basedir+'combine'): os.mkdir(basedir+'combine/')
if ~os.path.exists(basedir+'combine/logs/'): os.mkdir(basedir+'combine/logs/')
if ~os.path.exists(localdir+'dnidever/nsc/instcal/'+version+'/'): os.mkdir(localdir+'dnidever/nsc/instcal/'+version+'/')
plotsdir = basedir+'plots/'
if ~os.path.exists(plotsdir): os.mkdir(plotsdir)
# Log file
#------------------
# format is nsc_combine_main.DATETIME.log
ltime = time.localtime()
# time.struct_time(tm_year=2019, tm_mon=7, tm_mday=22, tm_hour=0, tm_min=30, tm_sec=20, tm_wday=0, tm_yday=203, tm_isdst=1)
smonth = str(ltime[1])
if ltime[1]<10: smonth = '0'+smonth
sday = str(ltime[2])
if ltime[2]<10: sday = '0'+sday
syear = str(ltime[0])[2:]
shour = str(ltime[3])
if ltime[3]<10: shour='0'+shour
sminute = str(ltime[4])
if ltime[4]<10: sminute='0'+sminute
ssecond = str(int(ltime[5]))
if ltime[5]<10: ssecond='0'+ssecond
logtime = smonth+sday+syear+shour+sminute+ssecond
logfile = basedir+'combine/logs/nsc_instcal_combine_main.'+logtime+'.log'
#JOURNAL,logfile
print("Combining NOAO InstCal catalogs")
#goto,STARTRUNNING
# Restore the calibration summary file
temp = fits.getdata(basedir+'lists/nsc_instcal_calibrate.fits',1)
schema = dict(temp.dtype.fields)
schema['chipindx'] = (int,0)
schema['ngoodchipwcs'] = (int,0)
schema['wcscal'] = (np.str,50)
schema['telstat'] = (np.str,50)
dt = np.dtype(schema)
calstr = np.zeros(len(temp),dtype=dt)
calstr['chipindx'] = -1
for n in temp.dtype.names: calstr[n]=temp[n]
# Add WCSCAL and TELSTAT information
coords = fits.getdata(basedir+'lists/allcoords.fits',1)
fluxfile = calstr['file']
fluxfile = fluxfile.replace('/net','')
ind1,ind2 = dln.match(fluxfile,coords['file'])
calstr['wcscal'][ind1] = coords['wcscal'][ind2] # Failed (3153), Poor (14), Successful (308190)
calstr['telstat'][ind1] = coords['telstat'][ind2] # NAN (68188), Not (1222), Track (241826), UNKNOWN (116), Unknown (5)
# the 2054 failed exposures did not match b/c no fluxfile info
# Only want exposures with successful SE processing
gd,ncalstr = dln.where(calstr['success']==1)
calstr = calstr[gd]
si = np.argsort(calstr['expdir'])
calstr = calstr[si]
chstr = fits.getdata(basedir+'lists/nsc_instcal_calibrate.fits',2)
nchstr = len(chstr)
# Get indices for CHSTR
siexp = np.argsort(chstr['expdir'])
chstr = chstr[siexp]
expdir = chstr['expdir']
brklo,nbrk = dln.where(expdir != np.roll(expdir,1))
brkhi = [brklo[1:nbrk]-1,len(expdir)-1]
nchexp = brkhi-brklo+1
if ncalstr==len(brklo):
Exception('number of exposures in CALSTR and CHSTR do not match')
calstr['chipindx'] = brklo
calstr['nchips'] = nchexp
# Getting number of good chip WCS for each exposures
for i in range(len(calstr): calstr['ngoodchipwcs'][i] = np.sum(chstr['ngaiamatch']][brklo[i]:brkhi[i]+1]>0)
# Fixing absolute paths of flux filename
cfile = calstr['file']
cfile = cfile.replace('/net/mss1/','')
cfile = cfile.replace('/mss1/','')
# Fixing very negative RAs
print('FIXING NEGATIVE RAs in CALSTR and CHSTR')
#bdra, = np.where(chstr.cenra lt -180,nbdra)
bdra,nbdra = dln.where(chstr['cenra']<0)
dum,uibd = np.unique(chstr['expdir'][bdra],return_indices=True)
ind1,ind2 = dln.match(calstr['expdir'],chstr['expdir'][bdra[uibd]])
nmatch = len(ind1)
for i in range(nmatch):
ind3,ind4 = dln.match(chstr['expdir'][bdra],calstr['expdir'][ind1[i]])
# Fix CALSTR RA
chra = chstr['cenra'][bdra[ind3]]
bd1,nbd1 = dln.where(chra < -180)
if nbd1>0: chra[bd1]+=360
cenra = np.mean(dln.minmax(chra))
if cenra<0: cenra+=360
calstr['ra'][ind1[i]] = cenra
# Fix CHSTR CENRA
bd2,nbd2 = dln.where(chra<0)
if nbd2>0: chra[bd2]+=360
chstr['cenra']][bdra[ind3]] = chra
# Fix CHSTR VRA
vra = chstr['vra'][bdra[ind3]]
bd3,nbd3 = dln.where(vra<0)
if nbd3>0: vra[bd3]+=360
chstr['vra'][bdra[ind3]] = vra
# Fix instrument in STR and CHSTR
print('FIXING INSTRUMENT IN STR AND CHSTR')
type = ['c4d','k4m','ksb']
for i=0,len(type)-1:
gd,ngd = dln.where(stregex(calstr.expdir,'/'+type[i]+'/',/boolean)==1)
if ngd>0: calstr[gd].instrument=type[i]
gd,ngd = dln.where(stregex(chstr.expdir,'/'+type[i]+'/',/boolean)==1)
if ngd>0: chstr[gd].instrument=type[i]
## Fix missing AIRMASS
#bdam, = np.where(str.airmass lt 0.9,nbdam)
#for i=0,nbdam-1 do begin
# type = ['c4d','k4m','ksb']
# obs = ['ctio','kpno','kpno']
# MATCH,str[bdam[i]].instrument,type,ind1,ind2,/sort
# obsname = obs[ind2]
# OBSERVATORY,obsname,obstr
# lat = obstr.latitude
# lon = obstr.longitude
# jd = date2jd(str[bdam[i]].dateobs)
# ra = str[bdam[i]].ra
# dec = str[bdam[i]].dec
# str[bdam[i]].airmass = AIRMASS(jd,ra,dec,lat,lon)
#endfor
# THIS IS STILL RETURNING -1, IS ONE OF THE VALUES WRONG??
# APPLY RELEASE-DATE CUTS
list1 = fits.getdata(basedir+'lists/decam_instcal_list.fits',1)
list2 = fits.getdata(basedir+'lists/mosaic3_instcal_list.fits',1)
list3 = fits.getdata(basedir+'lists/bok90prime_instcal_list.fits',1)
elist = np.hstack((list1,list2,list3))
fluxfile = [f[10:] for f in elist['fluxfile']]
ind1,ind2 = dln.match(fluxfile,cfile)
# some don't match because they were from a previous version
# of the input list
release_date = np.zeros(len(calstr),dtype=(np.str,100))+'2020-01-01 00:00:00'
release_date[ind2] = elist['release_date'][ind1]
release_date = release_date.strip().replace(' ','T')
trelease = Time(release_date, format='isot', scale='utc')
#release_cutoff = [2017,4,24] # v1 - April 24, 2017
#release_cutoff = [2017,10,11] # v2 - Oct 11, 2017
release_cutoff = [2019,7,9] # v3 - July 9, 2019
release_date_cutoff = ('%04d-%02d-%02d' % (release_cutoff[0],release_cutoff[1],release_cutoff[2]))+'T00:00:00'
tcutoff = Time(release_date_cutoff, format='isot', scale='utc')
gdrelease,ngdrelease,bdrelease,nbdrelease = dln.where(trelease.mjd <= tcutoff.mjd,comp=True)
print(str(ngdrelease)+' exposures are PUBLIC')
calstr = calstr[gdrelease] # impose the public data cut
# Zero-point structure
dt_zpstr = np.dtype([('instrument',np.str,10),('filter',np.str,10),('amcoef',float,2),('thresh',0)])
zpstr = np.zeros(10,dtype=dtype_zpstr)
zpstr['thresh'] = 0.5
zpstr['instrument'][0:7] = 'c4d'
zpstr['filter'][0:7] = ['u','g','r','i','z','Y','VR']
zpstr['amcoef'][0] = [-1.60273, -0.375253] # c4d-u
zpstr['amcoef'][1] = [0.277124, -0.198037] # c4d-g
zpstr['amcoef'][2] = [0.516382, -0.115443] # c4d-r
zpstr['amcoef'][3] = [0.380338, -0.067439] # c4d-i
zpstr['amcoef'][4] = [0.123924, -0.096877] # c4d-z
zpstr['amcoef'][5] = [-1.06529, -0.051967] # c4d-Y
zpstr['amcoef'][6] = [1.004357, -0.081105] # c4d-VR
# Mosiac3 z-band
zpstr['instrument'][7] = 'k4m'
zpstr['filter'][7] = 'z'
zpstr['amcoef'][7] = [-2.687201, -0.73573] # k4m-z
# Bok 90Prime, g and r
zpstr['instrument'][8] = 'ksb'
zpstr['filter'][8] = 'g'
zpstr['amcoef'][8] = [-2.859646, -1.40837] # ksb-g
zpstr['instrument'][9] = 'ksb'
zpstr['filter'][9] = 'r'
zpstr['amcoef'][9] = [-4.008771, -0.25718] # ksb-r
nzpstr = len(zpstr)
#STOP,'DOUBLE-CHECK THESE ZERO-POINTS!!!'
# APPLY QA CUTS IN ZEROPOINT AND SEEING
if ~nocuts:
print('APPLYING QA CUTS')
#fwhmthresh = 3.0 # arcsec, v1
fwhmthresh = 2.0 # arcsec, v2
#filters = ['u','g','r','i','z','Y','VR']
#nfilters = len(filters)
#zpthresh = [2.0,2.0,2.0,2.0,2.0,2.0,2.0]
#zpthresh = [0.5,0.5,0.5,0.5,0.5,0.5,0.5]
badzpmask = np.zeros(len(calstr),bool)+True
for i in range(nzpstr):
ind,nind = dln.where((calstr['instrument']==zpstr['instrument']][i]) & (calstr['filter']==zpstr['filter'][i]) & (calstr['success']==1))
print(zpstr['instrument'][i]+'-'+zpstr['filter'][i]+' '+str(nind)+' exposures')
if nind>0:
calstr1 = calstr[ind]
zpterm = calstr1['zpterm']
bdzp,nbdzp = dln.where(~np.isfinite(zpterm)) # fix Infinity/NAN
if nbdzp>0:zpterm[bdzp] = 999999.9
am = calstr1['airmass']
mjd = calstr1['mjd']
bdam,nbdam = dln.where(am < 0.9)
if nbdam>0: am[bdam] = np.median(am)
# I GOT TO HERE IN THE TRANSLATING!!!
glactc,calstr1.ra,calstr1.dec,2000.0,glon,glat,1,/deg
# Measure airmass dependence
gg0,ngg0 = dln.where((np.abs(zpterm)<50) & (am<2.0))
coef0 = dln.poly_fit(am[gg0],zpterm[gg0],1,robust=True)
zpf = dln.poly(am,coef0)
sig0 = np.mad(zpterm[gg0]-zpf[gg0])
gg,ngg = dln.where(np.abs(zpterm-zpf) < (np.maximum(3.5*sig0,0.2)))
coef = dln.poly_fit(am[gg],zpterm[gg],1,robust=True)
print(zpstr['instrument'][i]+'-'+zpstr['filter'][i]+' '+str(coef))
# Trim out bad exposures to determine the correlations and make figures
gg,ngg = dln.where(np.abs(zpterm-zpf) lt (3.5*sig0 > 0.2) and calstr1.airmass lt 2.0 and calstr1.fwhm lt 2.0 and calstr1.rarms lt 0.15 &
calstr1.decrms lt 0.15 and calstr1.success eq 1 and calstr1.wcscal eq 'Successful' and calstr1.zptermerr lt 0.05 &
calstr1.zptermsig lt 0.08 and (calstr1.ngoodchipwcs eq calstr1.nchips) &
(calstr1.instrument ne 'c4d' or calstr1.zpspatialvar_nccd le 5 or (calstr1.instrument eq 'c4d' and calstr1.zpspatialvar_nccd gt 5 and calstr1.zpspatialvar_rms lt 0.1)) and $
np.abs(glat) gt 10 and calstr1.nrefmatch gt 100 and calstr1.exptime ge 30)
# Zpterm with airmass dependence removed
relzpterm = zpterm + 25 # 25 to get "absolute" zpterm
relzpterm -= (zpstr['amcoef'][i])[1]*(am-1)
# CURRENTLY K4M/KSB HAVE EXPTIME-DEPENDENCE IN THE ZEROPOINTS!!
if (zpstr['instrument'][i]=='k4m') | (zpstr['instrument'][i]=='ksb'):
print('REMOVING EXPTIME-DEPENDENCE IN K4M/KSB ZEROPOINTS!!!')
relzpterm += 2.5*np.log10(calstr1['exptime'])
# Fit temporal variation in zpterm
mjd0 = 56200
xx = calstr1['mjd'][gg]-mjd0
yy = relzpterm[gg]
invvar = 1.0/calstr1['zptermerr'][gg]**2
nord = 3
bkspace = 200
sset1 = bspline_iterfit(xx,yy,invvar=invvar,nord=nord,bkspace=bkspace,yfit=yfit1)
sig1 = mad(yy-yfit1)
gd,ngd = dln.where(yy-yfit1 > -3*sig1)
# refit
sset = bspline_iterfit(xx[gd],yy[gd],invvar=invvar[gd],nord=nord,bkspace=bkspace)
yfit = bspline_valu(xx,sset)
allzpfit = bspline_valu(calstr1.mjd-mjd0,sset)
# Make some figures
# ZPterm vs. airmass
pfile = plotsdir+zpstr[i].instrument+'-'+zpstr[i].filter+'_zpterm_airmass'
ps_open,pfile,/color,thick=4,/encap
hess,am[gg],relzpterm[gg],dx=0.01,dy=0.02,xr=[0.9,2.5],yr=[-0.5,0.5]+median(relzpterm[gg]),xtit='Airmass',ytit='Zero-point',$
tit=zpstr[i].instrument+'-'+zpstr[i].filter
x = scale_vector(findgen(100),0.5,2.0)
oplot,x,poly(x,coef),co=250
ps_close
ps2png,pfile+'.eps',/eps
# ZPterm vs. time (density)
pfile = plotsdir+zpstr[i].instrument+'-'+zpstr[i].filter+'_zpterm_time_density'
ps_open,pfile,/color,thick=4,/encap
hess,calstr1[gg].mjd-mjd0,relzpterm[gg],dx=2,dy=0.02,yr=[-0.5,0.5]+median(relzpterm[gg]),xtit='Time (days)',ytit='Zero-point',$
tit=zpstr[i].instrument+'-'+zpstr[i].filter
oplot,calstr1[gg].mjd-mjd0,allzpfit[gg],ps=1,sym=0.3,co=250
xyouts,50,-0.45+median(relzpterm[gg]),'MJD!d0!n = '+str(mjd0,2),align=0,charsize=1.2
ps_close
ps2png,pfile+'.eps',/eps
# ZPterm vs. time (points)
pfile = plotsdir+zpstr[i].instrument+'-'+zpstr[i].filter+'_zpterm_time'
ps_open,pfile,/color,thick=4,/encap
plot,calstr1[gg].mjd-mjd0,relzpterm[gg],ps=1,sym=0.5,yr=[-0.5,0.5]+median(relzpterm[gg]),xs=1,ys=1,xtit='Time (days)',ytit='Zero-point',$
tit=zpstr[i].instrument+'-'+zpstr[i].filter,thick=1
oplot,calstr1[gg].mjd-mjd0,allzpfit[gg],ps=1,sym=0.3,co=250
xyouts,50,-0.45+median(relzpterm[gg]),'MJD!d0!n = '+str(mjd0,2),align=0,charsize=1.2
ps_close
ps2png,pfile+'.eps',/eps
# Remove temporal variations to get residual values
relzpterm -= allzpfit
# Find the GOOD exposures
#------------------------
# We are using ADDITIVE zpterm
# calmag = instmag + zpterm
# if there are clouds then instmag is larger/fainter
# and zpterm is smaller (more negative)
#bdind, = np.where(calstr[ind].zpterm-medzp lt -zpthresh[i],nbdind)
gdmask = (relzpterm >= -zpstr['thresh'][i]) & (relzpterm <= zpstr['thresh'][i])
gdind,ngdind,bdind,nbdind = dln.where(gdmask,comp=True)
print(' '+str(nbdind)+' exposures with ZPTERM below the threshold')
if ngdind>0: badzpmask[ind[gdind]] = 0
# Get bad DECaLS and SMASH exposures
badexp = np.zeros(len(calstr),bool)
READCOL,'/home/dnidever/projects/noaosourcecatalog/obslog/smash_badexposures.txt',smashexpnum,format='A',comment='#',/silent
MATCH,int(calstr.expnum),int(smashexpnum),ind1,ind2,/sort,count=nmatch
if nmatch>0:
badexp[ind1] = 1
badexp[ind1] = badexp[ind1] & (calstr['instrument'][ind1]=='c4d') # make sure they are DECam exposures
READCOL,'/home/dnidever/projects/noaosourcecatalog/obslog/decals_bad_expid.txt',decalsexpnum,format='A',comment='#',/silent
MATCH,int(calstr.expnum),int(decalsexpnum),ind1,ind2,/sort,count=nmatch
if nmatch>0:
badexp[ind1] = 1
badexp[ind1] = badexp[ind1] & (calstr['instrument'][ind1]=='c4d') # make sure they are DECam exposures
READCOL,'/home/dnidever/projects/noaosourcecatalog/obslog/mzls_bad_expid.txt',mzlsexpnum,format='A',comment='#',/silent
MATCH,int(calstr.expnum),int(mzlsexpnum),ind1,ind2,/sort,count=nmatch
if nmatch>0:
badexp[ind1] = 1
badexp[ind1] = badexp[ind1] & (calstr['instrument'][ind1]=='k4m') # make sure they are Mosaic3 exposures
# Final QA cuts
# Many of the short u-band exposures have weird ZPTERMs, not sure why
# There are a few exposures with BAD WCS, RA>360!
bdexp,nbdexp = dln.where((calstr['success']==0) | # SE failure
(calstr['wcscal']!='Successful') | # CP WCS failure
(calstr['fwhm']>fwhmthresh) | # bad seeing
(calstr['ra']>360) | # bad WCS/coords
(calstr['rarms']>0.15) | (calstr['decrms']>0.15) | # bad WCS
(badzpmask==1) | # bad ZPTERM
(calstr['zptermerr']>0.05) | # bad ZPTERMERR
(calstr['nrefmatch']<5) | # few phot ref match
(badexp==1) | # bad SMASH/LS exposure
#(calstr['ngoodchipwcs']<calstr['nchips'] | # not all chips astrom calibrated
((calstr['instrument']=='c4d') & (calstr['zpspatialvar_nccd']>5) & (calstr['zpspatialvar_rms']>0.1)))) # bad spatial zpterm
# rarms/decrms, nrefmatch
print('QA cuts remove '+str(nbdexp)+' exposures')
# Remove
torem = np.zeros(nchstr,bool)
for i in range(nbdexp): torem[calstr[bdexp[i]].chipindx:calstr[bdexp[i]].chipindx+calstr[bdexp[i]].nchips-1]=1
bdchstr,nbdchstr = dln.where(torem==1)
REMOVE,bdchstr,chstr
REMOVE,bdexp,calstr
# Get new CHIPINDEX values
# make two arrays of old and new indices to transfer
# the new index values into an array with the size of
# the old CHSTR
trimoldindex = lindgen(nchstr) # index into original array, but "bad" ones removed/trimed
remove,bdchstr,trimoldindex
trimnewindex = lindgen(len(trimoldindex)) # new index of trimmed array
newindex = lonarr(nchstr)-1
newindex[trimoldindex] = trimnewindex # new index in original array
newchipindex = newindex[calstr.chipindx]
str.chipindx = newchipindex
ncalstr = len(calstr)
# SHOULD INCLUDE CUTS ON ZTERMERR OR NPHOTMATCH
#STOP,'SHOULD INCLUDE CUTS ON ZTERMERR OR NPHOTMATCH'
#STARTRUNNING:
# CREATE LIST OF HEALPIX AND OVERLAPPING EXPOSURES
# Which healpix pixels have data
listfile = basedir+'lists/nsc_instcal_combine_healpix_list.fits'
if makelist | ~os.path.exists(listfile):
print('Finding the Healpix pixels with data')
radius = 1.1
dtype_healstr = np.dtype([('file',np.str,200),('base',np.str,200),('pix',int)])
healstr = np.zeros(100000,dtype=dtype_healstr)
nhealstr = len(healstr)
cnt = 0
for i in range(ncalstr):
if i % 1e3 == 0: print(str(i))
theta = (90-calstr[i].dec)/radeg
phi = calstr[i].ra/radeg
ANG2VEC,theta,phi,vec
QUERY_DISC,nside,vec,radius,listpix,nlistpix,/deg,/inclusive
# Use the chip corners to figure out which ones actually overlap
chstr1 = chstr[calstr['chipindx']][i]:calstr['chipindx'][i]+calstr['nchips'][i].nchips]
# rotate to tangent plane so it can handle RA=0/360 and poles properly
ROTSPHCEN,chstr1.vra,chstr1.vdec,calstr[i].ra,calstr[i].dec,vlon,vlat,/gnomic
# loop over healpix
overlap = np.zeros(nlistpix,bool)
for j in range(nlistpix):
PIX2VEC_RING,nside,listpix[j],vec,vertex
vertex = transpose(reform(vertex)) # [1,3,4] -> [4,3]
VEC2ANG,vertex,hdec,hra,/astro
ROTSPHCEN,hra,hdec,calstr[i].ra,calstr[i].dec,hlon,hlat,/gnomic
# loop over chips
for k in range(calstr['nchips'][i]):
overlap[j] >= coords.doPolygonsOverlap(hlon,hlat,vlon[*,k],vlat[*,k])
# Only keep the healpix with real overlaps
gdlistpix,ngdlistpix = dln.where(overlap==1)
if ngdlistpix>0:
listpix = listpix[gdlistpix]
nlistpix = ngdlistpix
else:
del(listpix)
nlistpix = 0
if nlistpix==0:
Exception('No healpix for this exposure. Something is wrong!')
# Add new elements to array
if (cnt+nlistpix)>nhealstr:
old = healstr
healstr = np.zeros(nhealstr+10000,dtype=dtype_healstr)
healstr[0:nhealstr] = old
nhealstr += 1e4
del(old)
# Add to the structure
healstr['file'][cnt:cnt+nlistpix] = calstr['expdir'][i]+'/'+calstr['base'][i]+'_cat.fits'
healstr['base'][cnt:cnt+nlistpix] = calstr['base'][i]
healstr['pix'][cnt:cnt+nlistpix] = listpix
cnt += nlistpix
# Trim extra elements
healstr = healstr[0:cnt]
nhealstr = len(healstr)
# Get uniq pixels
ui = uniq(healstr.pix,sort(healstr.pix))
upix = healstr[ui].pix
nupix = len(upix)
print(calstr(nupix)+' Healpix pixels have overlapping data')
# Get start/stop indices for each pixel
idx = sort(healstr.pix)
healstr = healstr[idx]
q = healstr.pix
lo,nlo = dln.where(q != np.roll(q,1))
#hi, = np.where(q ne shift(q,-1))
hi = [lo[1:nlo-1]-1,nhealstr-1]
nexp = hi-lo+1
dtype_index = np.dtype([('pix',int),('lo',int),('hi',int),('nexp',int)])
index = np.zeros(nupix,dtype=dtype_index)
index['pix'] = upix
index['lo'] = lo
index['hi'] = hi
index['nexp'] = nexp
npix = len(index)
# Replace /net/dl1/ with /dl1/ so it will work on all machines
healstr['file'] = healstr['file'].replace('/net/dl1/','/dl1/')
# Write the full list plus an index
print('Writing list to '+listfile)
Table(healstr).write(listfile)
# append other fits binary tables
hdulist = fits.open(listfile)
hdu = fits.table_to_hdu(Table(indexj)) # second, catalog
hdulist.append(hdu)
hdulist.writeto(listfile,overwrite=True)
hdulist.close()
if os.path.exists(listfile+'.gz'): os.remove(listfile+'.gz')
ret = subprocess.call(['gzip',listfile]) # compress final catalog
# Copy to local directory for faster reading speed
if os.path.exists(localdir+'dnidever/nsc/instcal/'+version+'/'): os.delete(localdir+'dnidever/nsc/instcal/'+version+'/')
os.copy(listfile+'.gz',localdir+'dnidever/nsc/instcal/'+version+'/')
# PUT NSIDE IN HEADER!!
# Using existing list
else:
print('Reading list from '+listfile)
healstr = fits.getdata(listfile,1)
index = fits.getdata(listfile,2)
upix = index['pix']
npix = len(index)
# Copy to local directory for faster reading speed
file_copy,listfile,localdir+'dnidever/nsc/instcal/'+version+'/',/over
# Load the list of healpix pixels for this server to be run LOCALLY
pixfile = basedir+'lists/combine_pix_'+host+'.txt'
READLINE,pixfile,pixlist,count=npixlist
rnd = sort(randomu(1,npixlist)) # RANDOMIZE!!
pixlist = int(pixlist[rnd])
print('Running '+str(npixlist)+' jobs on '+host+' with nmult='+str(nmulti))
cmd = "nsc_instcal_combine,"+str(pixlist,2)+",nside="+str(nside,2)+",version='"+version+"',/local,/filesexist"
if keyword_set(redo) then cmd+=',/redo'
cmddir = strarr(npixlist)+localdir+'dnidever/nsc/instcal/'+version+'/tmp/'
# Now run the combination program on each healpix pixel
a = '' & read,a,prompt='Press RETURN to start'
PBS_DAEMON,cmd,cmddir,jobs=jobs,/hyperthread,/idle,prefix='nsccmb',nmulti=nmulti,wait=1
## Make the commands
#cmd = "nsc_instcal_combine,"+str(index.pix,2)+",nside="+str(nside,2)+",version='"+version+"'"
#if keyword_set(redo) then cmd+=',/redo'
#cmddir = strarr(npix)+localdir+'dnidever/nsc/instcal/'+version+'/tmp/'
## Check if the output file exists
#if not keyword_set(redo) then begin
# outfiles = dir+'combine/'+str(upix/1000,2)+'/'+str(upix,2)+'.fits.gz'
# test = file_test(outfiles)
# gd, = np.where(test eq 0,ngd,comp=bd,ncomp=nbd)
# if nbd gt 0 then begin
# print,str(nbd,2),' files already exist and /redo not set.'
# endif
# if ngd eq 0 then begin
# print,'No files to process'
# return
# endif
# print,str(ngd,2),' files left to process'
# cmd = cmd[gd]
# cmddir = cmddir[gd]
#endif
## Prioritize longest-running jobs FIRST
## Use prediction program
#PIX2ANG_RING,nside,index.pix,theta,phi
#ra = phi*radeg
#dec = 90-theta*radeg
#glactc,ra,dec,2000.0,glon,glat,1,/deg
#dt = predictcombtime(glon,glat,index.nexp)
## Do the sorting
#hsi = reverse(sort(dt))
#cmd = cmd[hsi]
#cmddir = cmddir[hsi]
#dt = dt[hsi]
#index = index[hsi]
# Divide into three using total times
#tot = total(dt>10)
#totcum = total(dt>10,/cum)
#print,min(where(totcum ge tot/3))
#print,min(where(totcum ge 2*tot/3))
#ncmd = len(cmd)
#nhalf = ncmd/2
## Randomize 1st half for hulk/thing/gp09
#cmd1 = cmd[0:(nhalf-1)]
#cmdadir1 = cmddir[0:(nhalf-1)]
#pix1 = index[0:(nhalf-1)].pix
#index1 = index[0:(nhalf-1)]
## now randomize
#rnd = sort(randomu(1,len(cmd1)))
#cmd1 = cmd1[rnd]
#cmddir1 = cmddir1[rnd]
#pix1 = pix1[rnd]
#index1 = index1[rnd]
# Slice it up
## hulk, 1st
##cmd = cmd[0:(nhalf-1):3]
##cmddir = cmddir[0:(nhalf-1):3]
##pix = index[0:(nhalf-1):3].pix
#cmd = cmd1[0:(nhalf/3)-1]
#cmddir = cmddir1[0:(nhalf/3)-1]
#pix = pix1[0:(nhalf/3)-1]
# thing, 2nd
##cmd = cmd[1:(nhalf-1):3]
##cmddir = cmddir[1:(nhalf-1):3]
##pix = index[1:(nhalf-1):3].pix
#cmd = cmd1[(nhalf/3):(2*nhalf/3)-1]
#cmddir = cmddir1[(nhalf/3):(2*nhalf/3)-1]
#pix = pix1[(nhalf/3):(2*nhalf/3)-1]
# gp09, 3rd
##cmd = cmd[2:(nhalf-1):3]
##cmddir = cmddir[2:(nhalf-1):3]
##pix = index[2:(nhalf-1):3].pix
#cmd = cmd1[(2*nhalf/3):*]
#cmddir = cmddir1[(2*nhalf/3):*]
#pix = pix1[(2*nhalf/3):*]
# gp05
#cmd = cmd[nhalf:*:4]
#cmddir = cmddir[nhalf:*:4]
#pix = index[nhalf:*:4].pix
# gp06
#cmd = cmd[nhalf+1:*:4]
#cmddir = cmddir[nhalf+1:*:4]
#pix = index[nhalf+1:*:4].pix
# gp07
#cmd = cmd[nhalf+2:*:4]
#cmddir = cmddir[nhalf+2:*:4]
#pix = index[nhalf+2:*:4].pix
# gp08
#cmd = cmd[nhalf+3:*:4]
#cmddir = cmddir[nhalf+3:*:4]
#pix = index[nhalf+3:*:4].pix
## Prioritize longest-running jobs FIRST
## Load the DECam run times
#sum1 = mrdfits(dir+'nsccmb_summary_hulk.fits',1)
#sum2 = mrdfits(dir+'nsccmb_summary_thing.fits',1)
#sum3 = mrdfits(dir+'nsccmb_summary_gp09.fits',1)
#sum = [sum1,sum2,sum3]
#si = sort(sum.mtime)
#sum = sum[si]
## only keep fairly recent ones
#gd, = np.where(sum.mtime gt 1.4897704e+09,ngd)
#sum = sum[gd]
## Deal with duplicates
#dbl = doubles(sum.pix,count=ndbl)
#alldbl = doubles(sum.pix,/all,count=nalldbl)
#torem = bytarr(nalldbl)
#for i=0,ndbl-1 do begin
# MATCH,sum[alldbl].pix,sum[dbl[i]].pix,ind1,ind2,/sort,count=nmatch
# torem[ind1[0:nmatch-2]] = 1
#endfor
#bd=where(torem eq 1,nbd)
#remove,alldbl[bd],sum
#dt = lonarr(len(index))-1
#MATCH,index.pix,sum.pix,ind1,ind2,/sort,count=nmatch
#dt[ind1] = sum[ind2].dt
## Do the sorting
#hsi = reverse(sort(dt))
#cmd = cmd[hsi]
#cmddir = cmddir[hsi]
#dt = dt[hsi]
#
## Divide into three using total times
#tot = total(dt>10)
#totcum = total(dt>10,/cum)
#print,min(where(totcum ge tot/3))
#print,min(where(totcum ge 2*tot/3))
## Start with healpix with low NEXP and far from MW midplane, LMC/SMC
#pix2ang_ring,nside,index.pix,theta,phi
#pixra = phi*radeg
#pixdec = 90-theta*radeg
#glactc,pixra,pixdec,2000.0,pixgl,pixgb,1,/deg
#cel2lmc,pixra,pixdec,palmc,radlmc
#cel2smc,pixra,pixdec,rasmc,radsmc
#gdpix, = np.where(index.nexp lt 50 and np.abs(pixgb) gt 10 and radlmc gt 5 and radsmc gt 5,ngdpix)
#
#outfile = dldir+'users/dnidever/nsc/instcal/combine/'+str(index.pix,2)+'.fits'
# Now run the combination program on each healpix pixel
PBS_DAEMON,cmd,cmddir,jobs=jobs,/hyperthread,/idle,prefix='nsccmb',nmulti=nmulti,wait=1
# RUN NSC_COMBINE_SUMMARY WHEN IT'S DONE!!!
## Load all the summary/metadata files
#print,'Creating Healpix summary file'
#sumstr = replicate({pix:0L,nexposures:0L,nobjects:0L,success:0},nupix)
#sumstr.pix = upix
#for i=0,nupix-1 do begin
# if (i+1) mod 5000 eq 0 then print,i+1
# file = dir+'combine/'+str(upix[i],2)+'.fits'
# if file_test(file) eq 1 then begin
# meta = MRDFITS(file,1,/silent)
# sumstr[i].nexposures = len(meta)
# hd = headfits(file,exten=2)
# sumstr[i].nobjects = sxpar(hd,'naxis2')
# sumstr[i].success = 1
# endif else begin
# sumstr[i].success = 0
# endelse
#endfor
#gd, = np.where(sumstr.success eq 1,ngd)
#print,str(ngd,2),' Healpix successfully processed'
#print,'Writing summary file to ',dir+'combine/nsc_instcal_combine.fits'
#MWRFITS,sumstr,dir+'combine/nsc_instcal_combine.fits',/create
# End logfile
#------------
#JOURNAL
| mit |
nwillemse/misc-scripts | ib-downloader/ib-downloader3.py | 1 | 8685 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ib-downloader3.py
Created on Tue Jul 5 15:53:45 2016
@author: nwillemse
"""
import click
import time
import pandas as pd
from sys import argv
from datetime import datetime
from ib.ext.Contract import Contract
from ib.opt import Connection
class Downloader:
def __init__(
self, tickers, exchange, ticker_type, expiry, barsize,
start_date, end_date, ib_client_id, ib_port
):
self.tickers = tickers
self.exchange = exchange
self.ticker_type = ticker_type
self.expiry = expiry
self.barsize = barsize
self.client_id = ib_client_id
self.order_id = 1
self.port = ib_port
self.currency = 'USD'
self.tws_conn = None
self.curr_ohlc = pd.DataFrame(
columns=['open', 'high', 'low', 'close', 'volume', 'open_interest']
)
self.no_data_error = False
self.got_hist_data = False
self.dates_list = self._get_trade_dates(start_date, end_date)
self.what_to_show = 'MIDPOINT' if ticker_type=='CASH' else 'TRADES'
self.end_date = end_date
def _get_trade_dates(self, start_dt=None, end_dt=None):
if self.ticker_type in ['CASH', 'FUT']:
dates = pd.date_range(start_dt, end_dt).tolist()
res = map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), dates)
res.sort(reverse=True)
print(res)
else:
fn = 'nyse_dates.txt'
print("Loading trading days from %s..." % fn)
a = pd.read_csv(fn, parse_dates=['trade_date'])
sub = a[a.trade_date >= start_dt].trade_date
sub = sub[sub <= end_dt]
sub.sort_values(ascending=False, inplace=True)
res = sub.apply(lambda x: x.strftime('%Y-%m-%d')).values.tolist()
print("Loaded %s days from %s to %s" % (len(res), res[-1], res[0]))
#print(res)
return res
def error_handler(self, msg):
if msg.typeName == "error": # and msg.id != -1:
print("Server Error:", msg)
if msg.errorCode == 162:
self.no_data_error = True
def server_handler(self, msg):
if msg.typeName == "nextValidId":
self.order_id = msg.orderId
elif msg.typeName == "managedAccounts":
self.account_code = msg.accountsList
print(self.account_code)
elif msg.typeName == "historicalData":
self.historical_data_event(msg)
elif msg.typeName == "error" and msg.id != -1:
return
# else:
# print msg.typeName, msg
def create_contract(self, symbol, sec_type, exch, curr, expiry):
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = sec_type
contract.m_exchange = exch
contract.m_currency = curr
contract.m_expiry = expiry
if sec_type=='FUT':
contract.m_includeExpired = 1
print("symbol:%s secType:%s exchange:%s currency:%s expiry:%s" % (
contract.m_symbol, contract.m_secType, contract.m_exchange,
contract.m_currency, contract.m_expiry
)
)
return contract
def historical_data_event(self, msg):
if msg.date.find('finished') == -1:
try:
date = datetime.strptime(msg.date, '%Y%m%d %H:%M:%S')
except Exception:
date = datetime.strptime(msg.date, '%Y%m%d')
self.curr_ohlc.loc[date] = msg.open, msg.high, msg.low, msg.close, \
msg.volume, msg.count
else:
self.got_hist_data = True
def connect_to_tws(self):
self.tws_conn = Connection.create(host='localhost',
port=self.port,
clientId=self.client_id)
self.tws_conn.connect()
time.sleep(2)
if not self.tws_conn.isConnected():
raise Exception("Unable to connect to TWS. Make sure the Gateway or TWS has been started. Port=%s ClientId=%s" % (self.port, self.client_id))
def disconnect_from_tws(self):
if self.tws_conn is not None:
self.tws_conn.disconnect()
def register_callback_functions(self):
print("Registering callback functions...")
# Assign server messages handling function.
self.tws_conn.registerAll(self.server_handler)
# Assign error handling function.
self.tws_conn.register(self.error_handler, 'Error')
def request_historical_data(self, symbol_id, symbol):
contract = self.create_contract(symbol,
self.ticker_type,
self.exchange,
self.currency,
self.expiry)
self.got_hist_data = False
self.no_data_error = False
end_dt = self.end_date.strftime('%Y%m%d %H:%M:%S')
print("Requesting history for %s on %s..." % (symbol, self.end_date))
self.tws_conn.reqHistoricalData(symbol_id,
contract,
endDateTime=end_dt,
durationStr='250 D',
barSizeSetting=self.barsize,
whatToShow=self.what_to_show,
useRTH=0,
formatDate=1)
while not self.got_hist_data and not self.no_data_error:
time.sleep(1)
if self.no_data_error:
self.no_data_error = False
print("no data found for this day, continuing...")
return
time.sleep(8)
def start(self):
try:
print("Connecing to tws...")
self.connect_to_tws()
self.register_callback_functions()
for ticker in self.tickers:
print("Request historical data for %s" % ticker)
self.request_historical_data(1, ticker)
self.curr_ohlc.sort_index(ascending=False, inplace=True)
self.curr_ohlc.index.name = 'datetime'
if self.ticker_type=='CASH':
filename = ticker + '.' + self.currency + '.csv'
else:
filename = ticker + '.csv'
self.curr_ohlc.to_csv('data/' + filename)
except Exception:
print("Error:")
finally:
print("disconnected")
self.disconnect_from_tws()
@click.command()
@click.option('--tickers', '-t', default='SPY',
help='Comma separated list of tickers. Default="SPY"')
@click.option('--exchange', '-x', default='GLOBEX',
help='Comma separated list of tickers. Default="SPY"')
@click.option('--tickertype', '-tt', default='STK',
help='Type of tickers (STK, FUT or CASH). Defaul="STK"')
@click.option('--expiry', '-e',
help='The expiry when FUT ticker type. Default=None')
@click.option('--barsize', '-bs', default='15 mins',
help='Barsize of downloaded data. Default="15 mins"')
@click.option('--startdate', '-sd', default='2015-04-20',
help='Starting date for data download (YYYY-MM-DD).')
@click.option('--enddate', '-ed', default='2015-05-04',
help='Ending date for data download (YYYY-MM-DD).')
@click.option('--ib_client_id', '-c', default=200,
help='IB Client Id.')
@click.option('--ib_port', '-p', default=4001,
help='IB API Port.')
def main(tickers, exchange, tickertype, expiry, barsize, startdate,
enddate, ib_client_id, ib_port
):
"""
IB Downloader downloads data from Interactive Brokers for the specified
list of tickers.
"""
start_dt = datetime.strptime(startdate + ' 16:00:00', '%Y-%m-%d %H:%M:%S')
end_dt = datetime.strptime(enddate + ' 16:00:00', '%Y-%m-%d %H:%M:%S')
tickers = tickers.encode('ascii', 'ignore').split(',')
exchange = exchange.encode('ascii', 'ignore')
ticker_type = tickertype.encode('ascii', 'ignore')
barsize = barsize.encode('ascii', 'ignore')
if ticker_type == 'FUT':
expiry = expiry.encode('ascii', 'ignore')
print('Tickers: %s' % tickers)
system = Downloader(
tickers, exchange, ticker_type, expiry, barsize,
start_dt, end_dt, ib_client_id, ib_port
)
system.start()
if __name__ == '__main__':
main()
| mit |
simon-pepin/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
akionakamura/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
bloyl/mne-python | mne/decoding/mixin.py | 14 | 2851 |
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : array, shape (n_samples, n_features)
Training set.
y : array, shape (n_samples,)
Target values.
**fit_params : dict
Additional fitting parameters passed to ``self.fit``.
Returns
-------
X_new : array, shape (n_samples, n_features_new)
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class EstimatorMixin(object):
"""Mixin class for estimators."""
def get_params(self, deep=True):
"""Get the estimator params.
Parameters
----------
deep : bool
Deep.
"""
return
def set_params(self, **params):
"""Set parameters (mimics sklearn API).
Parameters
----------
**params : dict
Extra parameters.
Returns
-------
inst : object
The instance.
"""
if not params:
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
| bsd-3-clause |
catalyst-cooperative/pudl | src/pudl/output/pudltabl.py | 1 | 35324 | """
This module provides a class enabling tabular compilations from the PUDL DB.
Many of our potential users are comfortable using spreadsheets, not databases,
so we are creating a collection of tabular outputs that contain the most
useful core information from the PUDL data packages, including additional keys
and human readable names for the objects (utilities, plants, generators) being
described in the table.
These tabular outputs can be joined with each other using those keys, and used
as a data source within Microsoft Excel, Access, R Studio, or other data
analysis packages that folks may be familiar with. They aren't meant to
completely replicate all the data and relationships contained within the full
PUDL database, but should serve as a generally usable set of PUDL data
products.
The PudlTabl class can also provide access to complex derived values, like the
generator and plant level marginal cost of electricity (MCOE), which are
defined in the analysis module.
In the long run, this is a probably a kind of prototype for pre-packaged API
outputs or data products that we might want to be able to provide to users a la
carte.
Todo:
Return to for update arg and returns values in functions below
"""
import logging
from pathlib import Path
# Useful high-level external modules.
import pandas as pd
import sqlalchemy as sa
import pudl
from pudl import constants as pc
logger = logging.getLogger(__name__)
###############################################################################
# Output Class, that can pull all the below tables with similar parameters
###############################################################################
class PudlTabl(object):
"""A class for compiling common useful tabular outputs from the PUDL DB."""
def __init__(
self,
pudl_engine,
ds=None,
freq=None,
start_date=None,
end_date=None,
fill_fuel_cost=False,
roll_fuel_cost=False,
fill_net_gen=False
):
"""
Initialize the PUDL output object.
Private data members are not initialized until they are requested.
They are then cached within the object unless they get re-initialized
via a method that includes update=True.
Some methods (e.g mcoe) will take a while to run, since they need to
pull substantial data and do a bunch of calculations.
Args:
freq (str): String describing time frequency at which to aggregate
the reported data. E.g. 'MS' (monthly start).
start_date (date): Beginning date for data to pull from the
PUDL DB.
end_date (date): End date for data to pull from the PUDL DB.
pudl_engine (sqlalchemy.engine.Engine): SQLAlchemy connection engine
for the PUDL DB.
fill_fuel_cost (boolean): if True, fill in missing EIA fuel cost
from ``frc_eia923()`` with state-level monthly averages from EIA's
API.
roll_fuel_cost (boolean): if True, apply a rolling average
to a subset of output table's columns (currently only
'fuel_cost_per_mmbtu' for the frc table).
fill_net_gen (boolean): if True, use net generation from the
generation_fuel_eia923 - which is reported at the
plant/fuel/prime mover level - re-allocated to generators in
``mcoe()``, ``capacity_factor()`` and ``heat_rate_by_unit()``.
"""
self.pudl_engine = pudl_engine
self.freq = freq
# We need datastore access because some data is not yet integrated into the
# PUDL DB. See the etl_eia861 method.
self.ds = ds
if self.ds is None:
pudl_in = Path(pudl.workspace.setup.get_defaults()["pudl_in"])
self.ds = pudl.workspace.datastore.Datastore(
local_cache_path=pudl_in / "data"
)
# grab all working eia dates to use to set start and end dates if they
# are not set
eia_dates = pudl.helpers.get_working_eia_dates()
if start_date is None:
self.start_date = min(eia_dates)
else:
# Make sure it's a date... and not a string.
self.start_date = pd.to_datetime(start_date)
if end_date is None:
self.end_date = max(eia_dates)
else:
# Make sure it's a date... and not a string.
self.end_date = pd.to_datetime(end_date)
if not pudl_engine:
raise AssertionError('PudlTabl object needs a pudl_engine')
self.roll_fuel_cost = roll_fuel_cost
self.fill_fuel_cost = fill_fuel_cost
self.fill_net_gen = fill_net_gen
# We populate this library of dataframes as they are generated, and
# allow them to persist, in case they need to be used again.
self._dfs = {
"pu_eia": None,
"pu_ferc1": None,
"utils_eia860": None,
"bga_eia860": None,
"plants_eia860": None,
"gens_eia860": None,
"own_eia860": None,
# TODO add the other tables -- this is just an interim check
"advanced_metering_infrastructure_eia861": None,
"balancing_authority_eia861": None,
"balancing_authority_assn_eia861": None,
"demand_response_eia861": None,
"demand_side_management_eia861": None,
"distributed_generation_eia861": None,
"distribution_systems_eia861": None,
"dynamic_pricing_eia861": None,
"energy_efficiency_eia861": None,
"green_pricing_eia861": None,
"mergers_eia861": None,
"net_metering_eia861": None,
"non_net_metering_eia861": None,
"operational_data_eia861": None,
"reliability_eia861": None,
"sales_eia861": None,
"service_territory_eia861": None,
"utility_assn_eia861": None,
"utility_data_eia861": None,
# TODO add the other tables -- this is just an interim check
"respondent_id_ferc714": None,
"gf_eia923": None,
"frc_eia923": None,
"bf_eia923": None,
"gen_eia923": None,
"gen_og_eia923": None,
"gen_allocated_eia923": None,
"plants_steam_ferc1": None,
"fuel_ferc1": None,
"fbp_ferc1": None,
"plants_small_ferc1": None,
"plants_hydro_ferc1": None,
"plants_pumped_storage_ferc1": None,
"purchased_power_ferc1": None,
"plant_in_service_ferc1": None,
"bga": None,
"hr_by_unit": None,
"hr_by_gen": None,
"fuel_cost": None,
"capacity_factor": None,
"mcoe": None,
}
def pu_eia860(self, update=False):
"""
Pull a dataframe of EIA plant-utility associations.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['pu_eia'] is None:
self._dfs['pu_eia'] = pudl.output.eia860.plants_utils_eia860(
self.pudl_engine,
start_date=self.start_date,
end_date=self.end_date)
return self._dfs['pu_eia']
def pu_ferc1(self, update=False):
"""
Pull a dataframe of FERC plant-utility associations.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['pu_ferc1'] is None:
self._dfs['pu_ferc1'] = pudl.output.ferc1.plants_utils_ferc1(
self.pudl_engine)
return self._dfs['pu_ferc1']
###########################################################################
# EIA 861 Interim Outputs (awaiting full DB integration)
###########################################################################
def etl_eia861(self, update=False):
"""
A single function that runs the temporary EIA 861 ETL and sets all DFs.
This is an interim solution that provides a (somewhat) standard way of accessing
the EIA 861 data prior to its being fully integrated into the PUDL database. If
any of the dataframes is attempted to be accessed, all of them are set. Only
the tables that have actual transform functions are included, and as new
transform functions are completed, they would need to be added to the list
below. Surely there is a way to do this automatically / magically but that's
beyond my knowledge right now.
Args:
update (bool): Whether to overwrite the existing dataframes if they exist.
"""
if update or self._dfs["balancing_authority_eia861"] is None:
logger.warning(
"Running the interim EIA 861 ETL process!")
eia861_raw_dfs = (
pudl.extract.eia861.Extractor(self.ds)
.extract(year=pc.working_partitions["eia861"]["years"])
)
eia861_tfr_dfs = pudl.transform.eia861.transform(eia861_raw_dfs)
for table in eia861_tfr_dfs:
self._dfs[table] = eia861_tfr_dfs[table]
def advanced_metering_infrastructure_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["advanced_metering_infrastructure_eia861"]
def balancing_authority_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["balancing_authority_eia861"]
def balancing_authority_assn_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["balancing_authority_assn_eia861"]
def demand_response_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["demand_response_eia861"]
def demand_side_management_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["demand_side_management_eia861"]
def distributed_generation_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["distributed_generation_eia861"]
def distribution_systems_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["distribution_systems_eia861"]
def dynamic_pricing_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["dynamic_pricing_eia861"]
def energy_efficiency_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["energy_efficiency_eia861"]
def green_pricing_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["green_pricing_eia861"]
def mergers_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["mergers_eia861"]
def net_metering_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["net_metering_eia861"]
def non_net_metering_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["non_net_meterin_eia861"]
def operational_data_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["operational_data_eia861"]
def reliability_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["reliability_eia861"]
def sales_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["sales_eia861"]
def service_territory_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["service_territory_eia861"]
def utility_assn_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["utility_assn_eia861"]
def utility_data_eia861(self, update=False):
"""An interim EIA 861 output function."""
self.etl_eia861(update=update)
return self._dfs["_eia861"]
###########################################################################
# FERC 714 Interim Outputs (awaiting full DB integration)
###########################################################################
def etl_ferc714(self, update=False):
"""
A single function that runs the temporary FERC 714 ETL and sets all DFs.
This is an interim solution, so that we can have a (relatively) standard way of
accessing the FERC 714 data prior to getting it integrated into the PUDL DB.
Some of these are not yet cleaned up, but there are dummy transform functions
which pass through the raw DFs with some minor alterations, so all the data is
available as it exists right now.
An attempt to access *any* of the dataframes results in all of them being
populated, since generating all of them is almost the same amount of work as
generating one of them.
Args:
update (bool): Whether to overwrite the existing dataframes if they exist.
"""
if update or self._dfs["respondent_id_ferc714"] is None:
logger.warning(
"Running the interim FERC 714 ETL process!")
ferc714_raw_dfs = pudl.extract.ferc714.extract(ds=self.ds)
ferc714_tfr_dfs = pudl.transform.ferc714.transform(ferc714_raw_dfs)
for table in ferc714_tfr_dfs:
self._dfs[table] = ferc714_tfr_dfs[table]
def respondent_id_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["respondent_id_ferc714"]
def demand_hourly_pa_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["demand_hourly_pa_ferc714"]
def description_pa_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["description_pa_ferc714"]
def id_certification_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["id_certification_ferc714"]
def gen_plants_ba_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["gen_plants_ba_ferc714"]
def demand_monthly_ba_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["demand_monthly_ba_ferc714"]
def net_energy_load_ba_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["net_energy_load_ba_ferc714"]
def adjacency_ba_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["adjacency_ba_ferc714"]
def interchange_ba_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["interchange_ba_ferc714"]
def lambda_hourly_ba_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["lambda_hourly_ba_ferc714"]
def lambda_description_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["lambda_description_ferc714"]
def demand_forecast_pa_ferc714(self, update=False):
"""An interim FERC 714 output function."""
self.etl_ferc714(update=update)
return self._dfs["demand_forecast_pa_ferc714"]
###########################################################################
# EIA 860/923 OUTPUTS
###########################################################################
def utils_eia860(self, update=False):
"""
Pull a dataframe describing utilities reported in EIA 860.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['utils_eia860'] is None:
self._dfs['utils_eia860'] = pudl.output.eia860.utilities_eia860(
self.pudl_engine,
start_date=self.start_date,
end_date=self.end_date)
return self._dfs['utils_eia860']
def bga_eia860(self, update=False):
"""
Pull a dataframe of boiler-generator associations from EIA 860.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['bga_eia860'] is None:
self._dfs['bga_eia860'] = pudl.output.eia860.boiler_generator_assn_eia860(
self.pudl_engine,
start_date=self.start_date,
end_date=self.end_date)
return self._dfs['bga_eia860']
def plants_eia860(self, update=False):
"""
Pull a dataframe of plant level info reported in EIA 860.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['plants_eia860'] is None:
self._dfs['plants_eia860'] = pudl.output.eia860.plants_eia860(
self.pudl_engine,
start_date=self.start_date,
end_date=self.end_date,)
return self._dfs['plants_eia860']
def gens_eia860(self, update=False):
"""
Pull a dataframe describing generators, as reported in EIA 860.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['gens_eia860'] is None:
self._dfs['gens_eia860'] = pudl.output.eia860.generators_eia860(
self.pudl_engine,
start_date=self.start_date,
end_date=self.end_date)
return self._dfs['gens_eia860']
def own_eia860(self, update=False):
"""
Pull a dataframe of generator level ownership data from EIA 860.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['own_eia860'] is None:
self._dfs['own_eia860'] = pudl.output.eia860.ownership_eia860(
self.pudl_engine,
start_date=self.start_date,
end_date=self.end_date)
return self._dfs['own_eia860']
def gf_eia923(self, update=False):
"""
Pull EIA 923 generation and fuel consumption data.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['gf_eia923'] is None:
self._dfs['gf_eia923'] = pudl.output.eia923.generation_fuel_eia923(
self.pudl_engine,
freq=self.freq,
start_date=self.start_date,
end_date=self.end_date)
return self._dfs['gf_eia923']
def frc_eia923(self, update=False):
"""
Pull EIA 923 fuel receipts and costs data.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['frc_eia923'] is None:
self._dfs['frc_eia923'] = pudl.output.eia923.fuel_receipts_costs_eia923(
self.pudl_engine,
freq=self.freq,
start_date=self.start_date,
end_date=self.end_date,
fill=self.fill_fuel_cost,
roll=self.roll_fuel_cost)
return self._dfs['frc_eia923']
def bf_eia923(self, update=False):
"""
Pull EIA 923 boiler fuel consumption data.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['bf_eia923'] is None:
self._dfs['bf_eia923'] = pudl.output.eia923.boiler_fuel_eia923(
self.pudl_engine,
freq=self.freq,
start_date=self.start_date,
end_date=self.end_date)
return self._dfs['bf_eia923']
def gen_eia923(self, update=False):
"""
Pull EIA 923 net generation data by generator.
Net generation is reported in two seperate tables in EIA 923: in the
generation_eia923 and generation_fuel_eia923 tables. While the
generation_fuel_eia923 table is more complete (the generation_eia923
table includes only ~55% of the reported MWhs), the generation_eia923
table is more granular (it is reported at the generator level).
This method either grabs the generation_eia923 table that is reported
by generator, or allocates net generation from the
generation_fuel_eia923 table to the generator level.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['gen_eia923'] is None:
if self.fill_net_gen:
logger.info(
'Allocating net generation from the generation_fuel_eia923 '
'to the generator level instead of using the less complete '
'generation_eia923 table.'
)
self._dfs['gen_eia923'] = self.gen_allocated_eia923(update)
else:
self._dfs['gen_eia923'] = self.gen_original_eia923(update)
return self._dfs['gen_eia923']
def gen_original_eia923(self, update=False):
"""Pull the original EIA 923 net generation data by generator."""
if update or self._dfs['gen_og_eia923'] is None:
self._dfs['gen_og_eia923'] = pudl.output.eia923.generation_eia923(
self.pudl_engine,
freq=self.freq,
start_date=self.start_date,
end_date=self.end_date)
return self._dfs['gen_og_eia923']
def gen_allocated_eia923(self, update=False):
"""Net generation from gen fuel table allocated to generators."""
if update or self._dfs['gen_allocated_eia923'] is None:
self._dfs['gen_allocated_eia923'] = (
pudl.analysis.allocate_net_gen.allocate_gen_fuel_by_gen(self)
)
return self._dfs['gen_allocated_eia923']
###########################################################################
# FERC FORM 1 OUTPUTS
###########################################################################
def plants_steam_ferc1(self, update=False):
"""
Pull the FERC Form 1 steam plants data.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['plants_steam_ferc1'] is None:
self._dfs['plants_steam_ferc1'] = pudl.output.ferc1.plants_steam_ferc1(
self.pudl_engine)
return self._dfs['plants_steam_ferc1']
def fuel_ferc1(self, update=False):
"""
Pull the FERC Form 1 steam plants fuel consumption data.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['fuel_ferc1'] is None:
self._dfs['fuel_ferc1'] = pudl.output.ferc1.fuel_ferc1(
self.pudl_engine)
return self._dfs['fuel_ferc1']
def fbp_ferc1(self, update=False):
"""
Summarize FERC Form 1 fuel usage by plant.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['fbp_ferc1'] is None:
self._dfs['fbp_ferc1'] = pudl.output.ferc1.fuel_by_plant_ferc1(
self.pudl_engine)
return self._dfs['fbp_ferc1']
def plants_small_ferc1(self, update=False):
"""
Pull the FERC Form 1 Small Plants Table.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['plants_small_ferc1'] is None:
self._dfs['plants_small_ferc1'] = pudl.output.ferc1.plants_small_ferc1(
self.pudl_engine)
return self._dfs['plants_small_ferc1']
def plants_hydro_ferc1(self, update=False):
"""
Pull the FERC Form 1 Hydro Plants Table.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['plants_hydro_ferc1'] is None:
self._dfs['plants_hydro_ferc1'] = pudl.output.ferc1.plants_hydro_ferc1(
self.pudl_engine)
return self._dfs['plants_hydro_ferc1']
def plants_pumped_storage_ferc1(self, update=False):
"""
Pull the FERC Form 1 Pumped Storage Table.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['plants_pumped_storage_ferc1'] is None:
self._dfs['plants_pumped_storage_ferc1'] = pudl.output.ferc1.plants_pumped_storage_ferc1(
self.pudl_engine)
return self._dfs['plants_pumped_storage_ferc1']
def purchased_power_ferc1(self, update=False):
"""
Pull the FERC Form 1 Purchased Power Table.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['purchased_power_ferc1'] is None:
self._dfs['purchased_power_ferc1'] = pudl.output.ferc1.purchased_power_ferc1(
self.pudl_engine)
return self._dfs['purchased_power_ferc1']
def plant_in_service_ferc1(self, update=False):
"""
Pull the FERC Form 1 Plant in Service Table.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['plant_in_service_ferc1'] is None:
self._dfs['plant_in_service_ferc1'] = pudl.output.ferc1.plant_in_service_ferc1(
self.pudl_engine)
return self._dfs['plant_in_service_ferc1']
###########################################################################
# EIA MCOE OUTPUTS
###########################################################################
def bga(self, update=False):
"""
Pull the more complete EIA/PUDL boiler-generator associations.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['bga'] is None:
self._dfs['bga'] = pudl.output.glue.boiler_generator_assn(
self.pudl_engine,
start_date=self.start_date,
end_date=self.end_date)
return self._dfs['bga']
def hr_by_gen(self, update=False):
"""
Calculate and return generator level heat rates (mmBTU/MWh).
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['hr_by_gen'] is None:
self._dfs['hr_by_gen'] = pudl.analysis.mcoe.heat_rate_by_gen(self)
return self._dfs['hr_by_gen']
def hr_by_unit(self, update=False):
"""
Calculate and return generation unit level heat rates.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['hr_by_unit'] is None:
self._dfs['hr_by_unit'] = (
pudl.analysis.mcoe.heat_rate_by_unit(self)
)
return self._dfs['hr_by_unit']
def fuel_cost(self, update=False):
"""
Calculate and return generator level fuel costs per MWh.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['fuel_cost'] is None:
self._dfs['fuel_cost'] = pudl.analysis.mcoe.fuel_cost(self)
return self._dfs['fuel_cost']
def capacity_factor(self, update=False,
min_cap_fact=None, max_cap_fact=None):
"""
Calculate and return generator level capacity factors.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
Returns:
pandas.DataFrame: a denormalized table for interactive use.
"""
if update or self._dfs['capacity_factor'] is None:
self._dfs['capacity_factor'] = (
pudl.analysis.mcoe.capacity_factor(
self, min_cap_fact=min_cap_fact, max_cap_fact=max_cap_fact)
)
return self._dfs['capacity_factor']
def mcoe(self, update=False,
min_heat_rate=5.5, min_fuel_cost_per_mwh=0.0,
min_cap_fact=0.0, max_cap_fact=1.5):
"""
Calculate and return generator level MCOE based on EIA data.
Eventually this calculation will include non-fuel operating expenses
as reported in FERC Form 1, but for now only the fuel costs reported
to EIA are included. They are attibuted based on the unit-level heat
rates and fuel costs.
Args:
update (bool): If true, re-calculate the output dataframe, even if
a cached version exists.
min_heat_rate: lowest plausible heat rate, in mmBTU/MWh. Any MCOE
records with lower heat rates are presumed to be invalid, and
are discarded before returning.
min_cap_fact: minimum generator capacity factor. Generator records
with a lower capacity factor will be filtered out before
returning. This allows the user to exclude generators that
aren't being used enough to have valid.
min_fuel_cost_per_mwh: minimum fuel cost on a per MWh basis that is
required for a generator record to be considered valid. For
some reason there are now a large number of $0 fuel cost
records, which previously would have been NaN.
max_cap_fact: maximum generator capacity factor. Generator records
with a lower capacity factor will be filtered out before
returning. This allows the user to exclude generators that
aren't being used enough to have valid.
Returns:
:class:`pandas.DataFrame`: a compilation of generator attributes,
including fuel costs per MWh.
"""
if update or self._dfs['mcoe'] is None:
self._dfs['mcoe'] = pudl.analysis.mcoe.mcoe(
self,
min_heat_rate=min_heat_rate,
min_fuel_cost_per_mwh=min_fuel_cost_per_mwh,
min_cap_fact=min_cap_fact,
max_cap_fact=max_cap_fact,
)
return self._dfs['mcoe']
def get_table_meta(pudl_engine):
"""Grab the pudl sqlitie database table metadata."""
md = sa.MetaData()
md.reflect(pudl_engine)
return md.tables
| mit |
heli522/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
moutai/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 57 | 2195 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
gautamkmr/incubator-mxnet | example/kaggle-ndsb1/training_curves.py | 52 | 1879 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
## based on https://github.com/dmlc/mxnet/issues/1302
## Parses the model fit log file and generates a train/val vs epoch plot
import matplotlib.pyplot as plt
import numpy as np
import re
import argparse
parser = argparse.ArgumentParser(description='Parses log file and generates train/val curves')
parser.add_argument('--log-file', type=str,default="log_tr_va",
help='the path of log file')
args = parser.parse_args()
TR_RE = re.compile('.*?]\sTrain-accuracy=([\d\.]+)')
VA_RE = re.compile('.*?]\sValidation-accuracy=([\d\.]+)')
log = open(args.log_file).read()
log_tr = [float(x) for x in TR_RE.findall(log)]
log_va = [float(x) for x in VA_RE.findall(log)]
idx = np.arange(len(log_tr))
plt.figure(figsize=(8, 6))
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.plot(idx, log_tr, 'o', linestyle='-', color="r",
label="Train accuracy")
plt.plot(idx, log_va, 'o', linestyle='-', color="b",
label="Validation accuracy")
plt.legend(loc="best")
plt.xticks(np.arange(min(idx), max(idx)+1, 5))
plt.yticks(np.arange(0, 1, 0.2))
plt.ylim([0,1])
plt.show()
| apache-2.0 |
jakobworldpeace/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 82 | 4768 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_splits=3):
cv = KFold(n_splits=n_splits)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_splits
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
palmishr/3D_Curve_Reconstruct | helper.py | 1 | 19473 |
# coding: utf-8
# In[10]:
'''
# Planar curves generation - Cube Method
Divide a unit cube into 4 adjecent sub-cubes, pick a point randomly
from each sub-unit within a pre-specified planar volume.
The area within each sub-unit can be chosen based on
the desired expanse of the curve.
Join the chosen point by spline interpolation.
'''
# Helper functions for Spline Processing
from __future__ import division, print_function, absolute_import
import sys
import time
import math
import scipy as sp
from scipy.interpolate import splprep, splev
import random
import json
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from itertools import product, combinations
import random as random
import sklearn.datasets, sklearn.decomposition
import numpy.linalg as linalg
get_ipython().magic('matplotlib notebook')
# Helper functions for Spline Processing
def get_3d_points(X,Y,Z):
pts = np.concatenate((X,Y,Z), axis=0)
pts = pts.reshape(3,len(X))
return pts
def add_curve_to_array(x, y, z):
inputs = np.concatenate((x,y,z), axis=0)
len(inputs)
inputs = inputs.reshape(3,300)
return inputs
# Spline Generation
def spline_generate(pts):
#pts = np.unique(pts)
tck, u = splprep(pts, u=None, s=0.0)
u_new = np.linspace(u.min(), u.max(), 300)
x_new, y_new, z_new = splev(u_new, tck, der=0)
return x_new, y_new, z_new
def get_rot_angle(theta=0):
if(theta == 0): theta = np.random.uniform(0,1)*2*np.pi
cos_t = np.cos(theta)
sin_t = np.sin(theta)
return cos_t, sin_t
def random_rotate(x,y,z,a=0,b=0,g=0):
cos_t, sin_t = get_rot_angle(a)
r_x = np.matrix([[1, 0, 0], [0, cos_t, -sin_t], [0, sin_t, cos_t]])
cos_t, sin_t = get_rot_angle(b)
r_y = np.matrix([[cos_t, 0, sin_t], [0, 1, 0], [-sin_t,0, cos_t]])
cos_t, sin_t = get_rot_angle(g)
r_z = np.matrix([[cos_t, -sin_t, 0], [sin_t, cos_t, 0], [0, 0, 1]])
r = np.dot((np.dot(r_x, r_y)), r_z)
rot_v = np.dot(r,np.matrix([[x],[y],[z]]))
return rot_v.item(0),rot_v.item(1),rot_v.item(2)
def draw_cube(b):
for s, e in combinations(np.array(b), 2):
if np.sum(np.abs(s-e)) == 1:
ax.plot3D(*zip(s, e), color="r")
def create_show_p_curve():
boxes = [
[(0.5, 0.9), (0.9, 0.9), (0.5, 0.9)], #[(0.1, 0.5), (0.9, 0.9), (0.1, 0.5)],
[(-0.9, -0.5), (0.9, 0.9), (0.5, 0.9)], #[(-0.5, -0.1), (0.9, 0.9), (0.1, 0.5)],
[(0.5, 0.9), (0.9, 0.9), (-0.9, -0.5)], #[(0.1, 0.5), (0.9, 0.9), (-0.5, -0.1)],
[(-0.9, -0.5), (0.9, 0.9), (-0.9, -0.5)] #[(-0.5, -0.1), (0.9, 0.9), (-0.5, -0.1)],
]
X_raw=[]
Y_raw=[]
Z_raw=[]
N=1
data = {}
data['planar_curves'] = []
startTime = time.time()
for i in range(N):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
pts=[]
X_raw=[]
Y_raw=[]
Z_raw=[]
#for all points in this curve
x_theta = np.random.uniform(0,1)*2*np.pi
y_theta = np.random.uniform(0,1)*2*np.pi
z_theta = np.random.uniform(0,1)*2*np.pi
for b in boxes:
x = random.uniform(b[0][0]/1, b[0][1]/1)
y = random.uniform(b[1][0]/1, b[1][1]/1)
z = random.uniform(b[2][0]/1, b[2][1]/1)
x,y,z = random_rotate(x,y,z, x_theta, y_theta, z_theta)
X_raw.append(x)
Y_raw.append(y)
Z_raw.append(z)
# draw cube
r = [-1, 1]
for s, e in combinations(np.array(list(product(r, r, r))), 2):
if np.sum(np.abs(s-e)) == r[1]-r[0]:
ax.plot3D(*zip(s, e), color="b")
pts = get_3d_points(X_raw,Y_raw,Z_raw)
ax.plot(X_raw, Y_raw, Z_raw, 'ro')
X, Y, Z = spline_generate(pts)
curve = add_curve_to_array(X, Y, Z)
ax.plot(X, Y, Z, 'b--')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show()
data['planar_curves'].append(curve.tolist())
#create_show_p_curve()
# In[12]:
'''
# Non-planar curves generation - Cube Method
Divide a unit cube into 8 sub-cubes, pick a point randomly
from each sub-unit. Join the chosen point by spline
interpolation.
The area within each sub-unit can be chosen based on
the desired expanse of the curve.
'''
# Create Test NP Curve
def create_plot_new_np_curve():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# draw cube
r = [-1, 1]
for s, e in combinations(np.array(list(product(r, r, r))), 2):
if np.sum(np.abs(s-e)) == r[1]-r[0]:
ax.plot3D(*zip(s, e), color="b")
#plt.show()
boxes = [[(0.0, 1.0), (0.0, 1.0), (0.0, 1.0)],
[(-1.0, 0.0), (0.0, 1.0), (0.0, 1.0)],
[(0.0, 1.0), (-1.0, 0.0), (0.0, 1.0)],
[(0.0, 1.0), (0.0, 1.0), (-1.0, 0.0)],
[(-1.0, 0.0), (-1.0, 0.0), (-1.0, 0.0)],
[(0.0, 1.0), (-1.0, 0.0), (-1.0, 0.0)],
[(-1.0, 0.0), (-1.0, 0.0), (0.0, 1.0)],
[(-1.0, 0.0), (0.0, 1.0), (-1.0, 0.0)]]
import random as random
X_raw=[]
Y_raw=[]
Z_raw=[]
N=1
startTime = time.time()
for i in range(N):
X_raw=[]
Y_raw=[]
Z_raw=[]
for b in boxes:
x = random.uniform(b[0][0]/1, b[0][1]/1)
y = random.uniform(b[1][0]/1, b[1][1]/1)
z = random.uniform(b[2][0]/1, b[2][1]/1)
#print(x,y,z)
X_raw.append(x)
Y_raw.append(y)
Z_raw.append(z)
pts = get_3d_points(X_raw,Y_raw,Z_raw)
X, Y, Z = spline_generate(pts)
curve = add_curve_to_array(X, Y, Z)
ax.plot(X, Y, Z, 'b--')
ax.plot(X_raw, Y_raw, Z_raw, 'ro')
plt.show()
#create_plot_new_np_curve()
# In[38]:
# PCA analysis and plot
with open('np_curve_data_cube_method_1489680586.46.json') as infile:
c = json.load(infile)
n_planar_curves_array = np.asarray(c['non_planar_curves'])
with open('p_curve_data_cube_method_1489173944.8.json') as infile:
c = json.load(infile)
planar_curves_array = np.asarray(c['planar_curves'])
data = {}
data['planar_curves_error'] = []
data['non_planar_curves_error'] = []
import numpy as np
def pca_err(curves_array):
errors=[]
im = 0
for i in range(len(curves_array[:])):
X = curves_array[i].T
mu = np.mean(X, axis=0)
#print("X: ", X.shape)
#print("mu: ", mu)
pca = sklearn.decomposition.PCA()
pca.fit(X)
#ax1.plot(curves_array[i][0], curves_array[i][1], curves_array[i][2], 'ro')
nComp = 2
#print("Transfomed: ", pca.transform(X)[:,:nComp].shape)
#print("EV: ", pca.components_[:,:][:,:nComp])
transformed = pca.transform(X)[:,:nComp].T
if (im < 1):
fig = plt.figure()
fig.suptitle('Top Left - Original Curve | Top Right - PCA | Bottom Left - Reconstucted Curve', fontsize=10)
ax1 = fig.add_subplot(221, projection='3d')
ax2 = fig.add_subplot(222, projection='3d')
ax3 = fig.add_subplot(223, projection='3d')
ax1.plot(curves_array[0][0], curves_array[0][1], curves_array[0][2], 'ro')
ax2.plot(transformed[0], transformed[1], 'ro')
Xhat = np.dot(pca.transform(X)[:,:nComp], pca.components_[:nComp,:])
Xhat += mu
reconstructed_curve = Xhat.T
if (im < 1):
ax3.plot(reconstructed_curve[0], reconstructed_curve[1], reconstructed_curve[2], 'ro')
plt.show()
#print(Xhat.shape)
err = 0.5*sum((X-Xhat)**2)
errors.append(sum(err))
im = im+1
#print("Err: ", err)
return np.asarray(errors)
def plot_PCA_errors():
np_pca_err = pca_err(n_planar_curves_array)
p_pca_err = pca_err(planar_curves_array)
get_ipython().magic('matplotlib inline')
bins = np.linspace(0, 50, 50)
plt.hist(np_pca_err, bins, alpha=0.35, label='NPE')
plt.hist(p_pca_err, bins, alpha=0.35, label='PE')
plt.legend(loc='upper right')
plt.title('Reconstruction Errors Histogram')
plt.show()
#plot_PCA_errors()
# In[37]:
# PCA weigths initialized auto-encoder
# In[20]:
#Non-Planar Errors
def ae_with_pca_wt_np_errors():
with open('planarity_errors_1490807988.67.json') as infile:
c = json.load(infile)
np_errors = np.asarray(c['non_planar_curves_error'])
p_errors = np.asarray(c['planar_curves_error'])
NPE = np.insert(np_errors, 1, 1, axis=2)
PE = np.insert(p_errors, 1, 0, axis=2)
X = np.concatenate((NPE, PE), axis=0)
X = X.reshape(200,2)
hist, bins = np.histogram(X[0:100,0], bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
# In[21]:
#Planar Errors
def ae_with_pca_wt_p_errors():
with open('planarity_errors_1490807988.67.json') as infile:
c = json.load(infile)
np_errors = np.asarray(c['non_planar_curves_error'])
p_errors = np.asarray(c['planar_curves_error'])
NPE = np.insert(np_errors, 1, 1, axis=2)
PE = np.insert(p_errors, 1, 0, axis=2)
X = np.concatenate((NPE, PE), axis=0)
X = X.reshape(200,2)
hist, bins = np.histogram(X[100:200,0], bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
# In[42]:
#Autoencoder
_debug_verbose = False
class AutoEncoder(object):
def __init__(self, arch):
self.num_layers = len(arch)
self.input_layer_size = arch[0]
self.output_layer_size = arch[-1]
self.num_hidden_layers = len(arch)-2
self.costs = []
self.weights = [np.random.randn(y, x)
for x, y in zip(arch[:-1], arch[1:])]
self.biases = [np.random.randn(y, 1) for y in arch[1:]]
def getParams(self):
#Get weights and biases unrolled into vector:
params = [(x.ravel(), y.ravel()) for x, y in zip(self.weights, self.biases)]
return params
def forward(self, X):
for b, w in zip(self.biases, self.weights):
if (_debug_verbose): print("weights: ", w)
if (_debug_verbose): print("biases: ", b)
if (_debug_verbose): print("inputs :", X)
if (_debug_verbose): print("dot product :", np.dot(w, X))
#print("matrix dot product :", w.dot(X))
X = self.unit_step(np.dot(w, X) + b)
if (_debug_verbose): print("result :", X)
return X.reshape(3,1)
def unit_step(self, z):
#return (lambda x: 0 if (x) < 0 else 1, z)[1]
return z
def unit_step_prime(self, z):
return (1)
def cost_function(self, X):
self.yHat = self.forward(X)
if (_debug_verbose): print ("squared error of X:{0} - Xhat:{1} is {2} & sum is {3}\n".format(X, self.yHat, ((X-self.yHat)**2), sum((X-self.yHat)**2)))
J = 0.5*sum((X-self.yHat)**2)
#self.costs.append(J)
return J
def cost_derivative(self, output_activations, y):
return (output_activations-y)
def cost_function_by_epoch(self, test_data, n_test):
y_hat = [(self.forward(y)) for (y) in test_data[0:n_test]]
y = [(y) for (y) in test_data[0:n_test]]
#print([float(a[0][0]) for a in y])
np.seterr( over='ignore' )
#costs = []
costs = [0.5*((a - b)**2) for a, b in zip(y, y_hat)]
#costs.append([max(math.sqrt(0.5*(round(a[0][0],2) - round(b[0][0],2))**2),1000) for a, b in zip(y, y_hat)])
#costs.append([0.5*math.sqrt((float(a[1][0]) - float(b[1][0]))**2) for a, b in zip(y, y_hat)])
#costs.append([0.5*math.sqrt((float(a[2][0]) - float(b[2][0]))**2) for a, b in zip(y, y_hat)])
self.costs.append(sum(costs)) #/n_test)
#self.costs.append(sum(costs[:][:]))
#self.costs.append([sum(costs[0]),sum(costs[1]),sum(costs[2])])
if (_debug_verbose): print ("Total Cost {1} for Epoch {0} complete".format(len(self.costs), sum(self.costs[-1])))
if (_debug_verbose): print ("Axis-wise Cost is {0} ".format((self.costs[-1])))
return self.costs[-1]
def GD(self, training_data, epochs, learning_rate, test_data=None):
"""Train the neural network using batch-wise
gradient descent. If ``test_data`` is provided then the
network will be evaluated against the test data after each
epoch, and partial progress printed out."""
if test_data: n_test = len(test_data)
n = len(training_data)
for j in range(epochs):
np.random.shuffle(training_data)
self.process_batch(training_data, learning_rate)
if test_data:
result = self.evaluate(test_data, n_test)
if (_debug_verbose): print ("Epoch {0}: Score {1} / {2}".format(j, result, n_test))
else:
if (_debug_verbose): print ("Epoch {0} complete".format(j))
def process_batch(self, batch, learning_rate):
"""Update the network's weights by applying
gradient descent using backpropagation to a single batch.
"""
base_w = [np.zeros(w.shape) for w in self.weights]
base_b = [np.zeros(b.shape) for b in self.biases]
count=0
for x in batch:
delta_error_b , delta_error_w = self.backprop(x)
updated_b = [nb+dnb for nb, dnb in zip(base_b, delta_error_b)]
updated_w = [nw+dnw for nw, dnw in zip(base_w, delta_error_w)]
count=count+1
#print ("Process {0} inputs backprop ".format(count))
eta=learning_rate
self.weights = [w-(eta/len(batch))*nw
for w, nw in zip(self.weights, updated_w)]
self.biases = [b-(eta/len(batch))*nb
for b, nb in zip(self.biases, updated_b)]
def backprop(self, x):
"""Return ``( delta_w)`` representing the
gradient for the cost function C_x. """
if (_debug_verbose): print ("input: ", x)
delta_w = [np.zeros(w.shape) for w in self.weights]
delta_b = [np.zeros(b.shape) for b in self.biases]
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the activation (z) vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = self.unit_step(z)
activations.append(activation)
if (_debug_verbose): print ("activations: ", activations)
# backward pass
delta = self.cost_derivative(activations[-1], x) * self.unit_step_prime(zs[-1])
delta_b[-1] = delta
delta_w[-1] = np.dot(delta, activations[-2].transpose())
if (_debug_verbose): print ("cost derivative: ", self.cost_derivative(activations[-1], x))
if (_debug_verbose): print ("unit step: ", self.unit_step_prime(zs[-1]))
if (_debug_verbose): print("delta: ",delta)
for l in range(2, self.num_layers):
z = zs[-l]
step1 = np.dot(self.weights[-l+1].transpose(), delta)
delta = step1 * z
delta_b[-l] = delta
delta_w[-l] = np.dot(delta, activations[-l-1].transpose())
if (_debug_verbose): print ("delta b updated: ", delta_b)
if (_debug_verbose): print ("delta w updated:", delta_w)
#print ("delta b: ", delta_b)
#print ("delta w:", delta_w)
return (delta_b, delta_w)
def evaluate(self, test_data, n_test):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation."""
self.cost_function_by_epoch(test_data, n_test)
test_results = [self.forward(x)
for (x) in test_data]
return sum(((x) - (x_hat))**2 for (x, x_hat) in zip(test_data, test_results))/n_test
def reconstruct(self, inputs):
return [self.forward(x) for (x) in inputs]
# In[19]:
def rdm_wt_ae_errors():
import numpy as np
import matplotlib.pyplot as plt
with open('planarity_errors_1489714415.76.json') as infile:
c = json.load(infile)
np_errors = np.asarray(c['non_planar_curves_error'])
p_errors = np.asarray(c['planar_curves_error'])
# clean data
NPE = np.insert(np_errors, 1, 1, axis=2)
PE = np.insert(p_errors, 1, 0, axis=2)
X = np.concatenate((NPE, PE), axis=0)
X = X.reshape(200,2)
nan_idx = [i for i, x in enumerate(X) if (math.isnan(x[0]) == True)]
print(nan_idx)
X_cleaned = np.delete(X, nan_idx, axis=0)
X_cleaned.shape
bins = np.linspace(0, 100, 100)
plt.hist(X_cleaned[0:100,0], bins, alpha=0.25, label='NPE')
plt.hist(X_cleaned[100:198,0], bins, alpha=0.25, label='PE')
plt.legend(loc='upper right')
plt.show()
# In[32]:
def rdm_p_errors():
# planar curves
import numpy as np
import matplotlib.pyplot as plt
with open('planarity_errors_1488999893.39.json') as infile:
c = json.load(infile)
np_errors = np.asarray(c['non_planar_curves_error'])
p_errors = np.asarray(c['planar_curves_error'])
# clean data
NPE = np.insert(np_errors, 1, 1, axis=2)
PE = np.insert(p_errors, 1, 0, axis=2)
X = np.concatenate((NPE, PE), axis=0)
X = X.reshape(200,2)
nan_idx = [i for i, x in enumerate(X) if (math.isnan(x[0]) == True)]
print(nan_idx)
X_cleaned = np.delete(X, nan_idx, axis=0)
X_cleaned.shape
hist, bins = np.histogram(X_cleaned[100:197,0], bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
def rdm_np_errors():
# planar curves
import numpy as np
import matplotlib.pyplot as plt
with open('planarity_errors_1488999893.39.json') as infile:
c = json.load(infile)
np_errors = np.asarray(c['non_planar_curves_error'])
p_errors = np.asarray(c['planar_curves_error'])
# clean data
NPE = np.insert(np_errors, 1, 1, axis=2)
PE = np.insert(p_errors, 1, 0, axis=2)
X = np.concatenate((NPE, PE), axis=0)
X = X.reshape(200,2)
nan_idx = [i for i, x in enumerate(X) if (math.isnan(x[0]) == True)]
print(nan_idx)
X_cleaned = np.delete(X, nan_idx, axis=0)
X_cleaned.shape
hist, bins = np.histogram(X_cleaned[0:100,0], bins=70)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
# In[36]:
# non-planar curves
#hist, bins = np.histogram(X_cleaned[100:199,0], bins=50)
#width = 0.7 * (bins[1] - bins[0])
#center = (bins[:-1] + bins[1:]) / 2
#plt.bar(center, hist, align='center', width=width)
#plt.show()
| mit |
akhuia/Capstone_SeaFlow | Code/Step 2 - For 50 files/Step_PCA_New.py | 1 | 1871 | import glob
import os.path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pprint
def CreateList(Locale):
ListOfFiles = []
for name in glob.glob(Locale):
ListOfFiles.append(name)
return ListOfFiles
def Run_PCA(f):
FigFile = f[:-3]+"png"
if FigFile.find("Tok") == -1:
col = "blue"
else:
col = "red"
source = pd.read_csv(f)
newsource = source - source.mean()
datacov = newsource.cov()
eig_val_cov, eig_vec_cov = np.linalg.eig(datacov)
eig_pairs = [(np.abs(eig_val_cov[i]), eig_vec_cov[:,i]) for i in range(len(eig_val_cov))]
eig_pairs.sort()
eig_pairs.reverse()
matrix_w = np.hstack((eig_pairs[0][1].reshape(3,1), eig_pairs[1][1].reshape(3,1)))
transformed = newsource.as_matrix().dot(matrix_w)
plt.plot(transformed[0:len(source),0],transformed[0:len(source),1],\
'o', markersize=7, color=col, alpha=0.5, label='class1')
return ((eig_pairs[0]+eig_pairs[1]), f[f.find("armb"):])
def main():
Images = []
MatrixDict = {}
MatrixDict['File'] = []
MatrixDict['Eigenvalue1'] = []
MatrixDict['Eigenvector1'] = []
MatrixDict['Eigenvalue2'] = []
MatrixDict['Eigenvector2'] = []
path = "C:\Users\NYU\SeaFlow2\*"
FileList = CreateList(path)
for i in FileList:
Images.append(Run_PCA(i))
for i,j in enumerate(Images):
MatrixDict['File'].append(Images[i][1])
MatrixDict['Eigenvalue1'].append(Images[i][0][0])
MatrixDict['Eigenvector1'].append(Images[i][0][1])
MatrixDict['Eigenvalue2'].append(Images[i][0][2])
MatrixDict['Eigenvector2'].append(Images[i][0][3])
FinalMatrix = pd.DataFrame(MatrixDict, columns=['File','Eigenvalue1','Eigenvalue2','Eigenvector1','Eigenvector2'])
print FinalMatrix
if __name__ == "__main__":
main()
| mit |
mne-tools/mne-tools.github.io | 0.13/_downloads/plot_mne_dspm_source_localization.py | 4 | 5081 | """
.. _tut_inverse_mne_dspm:
Source localization with MNE/dSPM/sLORETA
=========================================
The aim of this tutorials is to teach you how to compute and apply a linear
inverse method such as MNE/dSPM/sLORETA on evoked/raw/epochs data.
"""
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import (make_inverse_operator, apply_inverse,
write_inverse_operator)
###############################################################################
# Process MEG data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, add_eeg_ref=False)
raw.set_eeg_reference() # set EEG average reference
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_r=1) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
exclude='bads')
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, reject=reject, add_eeg_ref=False)
###############################################################################
# Compute regularized noise covariance
# ------------------------------------
#
# For more details see :ref:`tut_compute_covariance`.
noise_cov = mne.compute_covariance(
epochs, tmax=0., method=['shrunk', 'empirical'])
fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)
###############################################################################
# Compute the evoked response
# ---------------------------
evoked = epochs.average()
evoked.plot()
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag')
# Show whitening
evoked.plot_white(noise_cov)
###############################################################################
# Inverse modeling: MNE/dSPM on evoked and raw data
# -------------------------------------------------
# Read the forward solution and compute the inverse operator
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Restrict forward solution as necessary for MEG
fwd = mne.pick_types_forward(fwd, meg=True, eeg=False)
# make an MEG inverse operator
info = evoked.info
inverse_operator = make_inverse_operator(info, fwd, noise_cov,
loose=0.2, depth=0.8)
write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
inverse_operator)
###############################################################################
# Compute inverse solution
# ------------------------
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
stc = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori=None)
del fwd, inverse_operator, epochs # to save memory
###############################################################################
# Visualization
# -------------
# View activation time-series
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
###############################################################################
# Here we use peak getter to move visualization to the time point of the peak
# and draw a marker at the maximum peak vertex.
vertno_max, time_max = stc.get_peak(hemi='rh')
subjects_dir = data_path + '/subjects'
brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]),
initial_time=time_max, time_unit='s')
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6)
brain.show_view('lateral')
###############################################################################
# Morph data to average brain
# ---------------------------
fs_vertices = [np.arange(10242)] * 2
morph_mat = mne.compute_morph_matrix('sample', 'fsaverage', stc.vertices,
fs_vertices, smooth=None,
subjects_dir=subjects_dir)
stc_fsaverage = stc.morph_precomputed('fsaverage', fs_vertices, morph_mat)
brain_fsaverage = stc_fsaverage.plot(surface='inflated', hemi='rh',
subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]),
initial_time=time_max, time_unit='s')
brain_fsaverage.show_view('lateral')
###############################################################################
# Exercise
# --------
# - By changing the method parameter to 'sloreta' recompute the source
# estimates using the sLORETA method.
| bsd-3-clause |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/metrics/pairwise.py | 7 | 47000 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
from functools import partial
import warnings
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples1, n_features)
Array containing points.
Y : {array-like, sparse matrix}, shape (n_samples2, n_features)
Arrays containing points.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=None):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
if size_threshold is not None:
warnings.warn('Use of the "size_threshold" is deprecated '
'in 0.19 and it will be removed version '
'0.21 of scikit-learn', DeprecationWarning)
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": frozenset(["gamma"]),
"cosine": (),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
filter_params : boolean
Whether to filter invalid parameters or not.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| mit |
toobaz/pandas | pandas/tests/indexing/test_loc.py | 1 | 33581 | """ test label based indexing with loc """
from io import StringIO
import re
from warnings import catch_warnings, filterwarnings
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range
from pandas.api.types import is_scalar
from pandas.tests.indexing.common import Base
from pandas.util import testing as tm
class TestLoc(Base):
def test_loc_getitem_dups(self):
# GH 5678
# repeated getitems on a dup index returning a ndarray
df = DataFrame(
np.random.random_sample((20, 5)), index=["ABCDE"[x % 5] for x in range(20)]
)
expected = df.loc["A", 0]
result = df.loc[:, 0].loc["A"]
tm.assert_series_equal(result, expected)
def test_loc_getitem_dups2(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame(
[[1, 2, "foo", "bar", Timestamp("20130101")]],
columns=["a", "a", "a", "a", "a"],
index=[1],
)
expected = Series(
[1, 2, "foo", "bar", Timestamp("20130101")],
index=["a", "a", "a", "a", "a"],
name=1,
)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{
"me": list("rttti"),
"foo": list("aaade"),
"bar": np.arange(5, dtype="float64") * 1.34 + 2,
"bar2": np.arange(5, dtype="float64") * -0.34 + 2,
}
).set_index("me")
indexer = tuple(["r", ["bar", "bar2"]])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(["r", "bar"])
df = df_orig.copy()
df.loc[indexer] *= 2.0
assert df.loc[indexer] == 2.0 * df_orig.loc[indexer]
indexer = tuple(["t", ["bar", "bar2"]])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({"a": [0, 1, 1], "b": Series([100, 200, 300], dtype="uint32")})
ix = df1["a"] == 1
newb1 = df1.loc[ix, "b"] + 1
df1.loc[ix, "b"] = newb1
expected = DataFrame(
{"a": [0, 1, 1], "b": Series([100, 201, 301], dtype="uint32")}
)
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({"a": [0, 1, 1], "b": [100, 200, 300]}, dtype="uint64")
ix = df1["a"] == 1
newb2 = df2.loc[ix, "b"]
df1.loc[ix, "b"] = newb2
expected = DataFrame({"a": [0, 1, 1], "b": [100, 200, 300]}, dtype="uint64")
tm.assert_frame_equal(df2, expected)
def test_loc_getitem_int(self):
# int label
self.check_result(
"int label", "loc", 2, "ix", 2, typs=["ints", "uints"], axes=0
)
self.check_result(
"int label", "loc", 3, "ix", 3, typs=["ints", "uints"], axes=1
)
self.check_result(
"int label", "loc", 2, "ix", 2, typs=["label"], fails=KeyError
)
def test_loc_getitem_label(self):
# label
self.check_result("label", "loc", "c", "ix", "c", typs=["labels"], axes=0)
self.check_result("label", "loc", "null", "ix", "null", typs=["mixed"], axes=0)
self.check_result("label", "loc", 8, "ix", 8, typs=["mixed"], axes=0)
self.check_result(
"label", "loc", Timestamp("20130102"), "ix", 1, typs=["ts"], axes=0
)
self.check_result(
"label", "loc", "c", "ix", "c", typs=["empty"], fails=KeyError
)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result(
"label range",
"loc",
"f",
"ix",
"f",
typs=["ints", "uints", "labels", "mixed", "ts"],
fails=KeyError,
)
self.check_result(
"label range", "loc", "f", "ix", "f", typs=["floats"], fails=KeyError
)
self.check_result(
"label range",
"loc",
20,
"ix",
20,
typs=["ints", "uints", "mixed"],
fails=KeyError,
)
self.check_result(
"label range", "loc", 20, "ix", 20, typs=["labels"], fails=TypeError
)
self.check_result(
"label range", "loc", 20, "ix", 20, typs=["ts"], axes=0, fails=TypeError
)
self.check_result(
"label range", "loc", 20, "ix", 20, typs=["floats"], axes=0, fails=KeyError
)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result(
"list lbl",
"loc",
[0, 2, 4],
"ix",
[0, 2, 4],
typs=["ints", "uints"],
axes=0,
)
self.check_result(
"list lbl",
"loc",
[3, 6, 9],
"ix",
[3, 6, 9],
typs=["ints", "uints"],
axes=1,
)
self.check_result(
"list lbl",
"loc",
["a", "b", "d"],
"ix",
["a", "b", "d"],
typs=["labels"],
axes=0,
)
self.check_result(
"list lbl",
"loc",
["A", "B", "C"],
"ix",
["A", "B", "C"],
typs=["labels"],
axes=1,
)
self.check_result(
"list lbl",
"loc",
[2, 8, "null"],
"ix",
[2, 8, "null"],
typs=["mixed"],
axes=0,
)
self.check_result(
"list lbl",
"loc",
[Timestamp("20130102"), Timestamp("20130103")],
"ix",
[Timestamp("20130102"), Timestamp("20130103")],
typs=["ts"],
axes=0,
)
def test_loc_getitem_label_list_with_missing(self):
self.check_result(
"list lbl",
"loc",
[0, 1, 2],
"indexer",
[0, 1, 2],
typs=["empty"],
fails=KeyError,
)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.check_result(
"list lbl",
"loc",
[0, 2, 10],
"ix",
[0, 2, 10],
typs=["ints", "uints", "floats"],
axes=0,
fails=KeyError,
)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.check_result(
"list lbl",
"loc",
[3, 6, 7],
"ix",
[3, 6, 7],
typs=["ints", "uints", "floats"],
axes=1,
fails=KeyError,
)
# GH 17758 - MultiIndex and missing keys
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.check_result(
"list lbl",
"loc",
[(1, 3), (1, 4), (2, 5)],
"ix",
[(1, 3), (1, 4), (2, 5)],
typs=["multi"],
axes=0,
)
def test_getitem_label_list_with_missing(self):
s = Series(range(3), index=["a", "b", "c"])
# consistency
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s[["a", "d"]]
s = Series(range(3))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s[[0, 3]]
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result(
"list lbl",
"loc",
[20, 30, 40],
"ix",
[20, 30, 40],
typs=["ints", "uints"],
axes=1,
fails=KeyError,
)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result(
"array like",
"loc",
Series(index=[0, 2, 4]).index,
"ix",
[0, 2, 4],
typs=["ints", "uints"],
axes=0,
)
self.check_result(
"array like",
"loc",
Series(index=[3, 6, 9]).index,
"ix",
[3, 6, 9],
typs=["ints", "uints"],
axes=1,
)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result(
"bool",
"loc",
b,
"ix",
b,
typs=["ints", "uints", "labels", "mixed", "ts", "floats"],
)
self.check_result("bool", "loc", b, "ix", b, typs=["empty"], fails=IndexError)
@pytest.mark.parametrize("index", [[True, False], [True, False, True, False]])
def test_loc_getitem_bool_diff_len(self, index):
# GH26658
s = Series([1, 2, 3])
with pytest.raises(
IndexError,
match=("Item wrong length {} instead of {}.".format(len(index), len(s))),
):
_ = s.loc[index]
def test_loc_getitem_int_slice(self):
# ok
self.check_result(
"int slice2",
"loc",
slice(2, 4),
"ix",
[2, 4],
typs=["ints", "uints"],
axes=0,
)
self.check_result(
"int slice2",
"loc",
slice(3, 6),
"ix",
[3, 6],
typs=["ints", "uints"],
axes=1,
)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(
np.random.random((3, 3)), index=["a", "b", "c"], columns=["e", "f", "g"]
)
# raise a KeyError?
msg = (
r"\"None of \[Int64Index\(\[1, 2\], dtype='int64'\)\] are"
r" in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
df.loc[[1, 2], [1, 2]]
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc["a"] = 2
with pytest.raises(KeyError, match=r"^-1$"):
s.loc[-1]
msg = (
r"\"None of \[Int64Index\(\[-1, -2\], dtype='int64'\)\] are"
r" in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
s.loc[[-1, -2]]
msg = (
r"\"None of \[Index\(\['4'\], dtype='object'\)\] are" r" in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
s.loc[["4"]]
s.loc[-1] = 3
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s["a"] = 2
msg = (
r"\"None of \[Int64Index\(\[-2\], dtype='int64'\)\] are"
r" in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
s.loc[[-2]]
del s["a"]
with pytest.raises(KeyError, match=msg):
s.loc[[-2]] = 0
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([["a"], ["b"]], index=[1, 2], columns=["value"])
msg = (
r"\"None of \[Int64Index\(\[3\], dtype='int64'\)\] are"
r" in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
df.loc[[3], :]
with pytest.raises(KeyError, match=msg):
df.loc[[3]]
def test_loc_getitem_list_with_fail(self):
# 15747
# should KeyError if *any* missing labels
s = Series([1, 2, 3])
s.loc[[2]]
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Int64Index([3], dtype='int64')] are in the [index]\""
),
):
s.loc[[3]]
# a non-match and a match
with tm.assert_produces_warning(FutureWarning):
expected = s.loc[[2, 3]]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result(
"lab slice",
"loc",
slice(1, 3),
"ix",
slice(1, 3),
typs=["labels", "mixed", "empty", "ts", "floats"],
fails=TypeError,
)
# real label slices
self.check_result(
"lab slice",
"loc",
slice("a", "c"),
"ix",
slice("a", "c"),
typs=["labels"],
axes=0,
)
self.check_result(
"lab slice",
"loc",
slice("A", "C"),
"ix",
slice("A", "C"),
typs=["labels"],
axes=1,
)
self.check_result(
"ts slice",
"loc",
slice("20130102", "20130104"),
"ix",
slice("20130102", "20130104"),
typs=["ts"],
axes=0,
)
self.check_result(
"ts slice",
"loc",
slice("20130102", "20130104"),
"ix",
slice("20130102", "20130104"),
typs=["ts"],
axes=1,
fails=TypeError,
)
# GH 14316
self.check_result(
"ts slice rev",
"loc",
slice("20130104", "20130102"),
"indexer",
[0, 1, 2],
typs=["ts_rev"],
axes=0,
)
self.check_result(
"mixed slice",
"loc",
slice(2, 8),
"ix",
slice(2, 8),
typs=["mixed"],
axes=0,
fails=TypeError,
)
self.check_result(
"mixed slice",
"loc",
slice(2, 8),
"ix",
slice(2, 8),
typs=["mixed"],
axes=1,
fails=KeyError,
)
self.check_result(
"mixed slice",
"loc",
slice(2, 4, 2),
"ix",
slice(2, 4, 2),
typs=["mixed"],
axes=0,
fails=TypeError,
)
def test_loc_index(self):
# gh-17131
# a boolean index should index like a boolean numpy array
df = DataFrame(
np.random.random(size=(5, 10)),
index=["alpha_0", "alpha_1", "alpha_2", "beta_0", "beta_1"],
)
mask = df.index.map(lambda x: "alpha" in x)
expected = df.loc[np.array(mask)]
result = df.loc[mask]
tm.assert_frame_equal(result, expected)
result = df.loc[mask.values]
tm.assert_frame_equal(result, expected)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4),
columns=["A", "B", "C", "D"],
index=["A", "B", "C", "D"],
)
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
assert (result.columns == ["A", "B"]).all()
assert (result.index == ["A", "B"]).all()
# mixed type
result = DataFrame({"a": [Timestamp("20130101")], "b": [1]}).iloc[0]
expected = Series([Timestamp("20130101"), 1], index=["a", "b"], name=0)
tm.assert_series_equal(result, expected)
assert result.dtype == object
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similarly for setitem and loc when rows have a null-slice
expected = DataFrame(
{
"date": Series(0, index=range(5), dtype=np.int64),
"val": Series(range(5), dtype=np.int64),
}
)
df = DataFrame(
{
"date": date_range("2000-01-01", "2000-01-5"),
"val": Series(range(5), dtype=np.int64),
}
)
df.loc[:, "date"] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame(
{
"date": date_range("2000-01-01", "2000-01-5"),
"val": Series(range(5), dtype=np.int64),
}
)
df.loc[:, "date"] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame(
{
"date": date_range("2000-01-01", "2000-01-5"),
"val": Series(range(5), dtype=np.int64),
}
)
df.loc[:, "date"] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame(
{
"date": Series("foo", index=range(5)),
"val": Series(range(5), dtype=np.int64),
}
)
df = DataFrame(
{
"date": date_range("2000-01-01", "2000-01-5"),
"val": Series(range(5), dtype=np.int64),
}
)
df.loc[:, "date"] = "foo"
tm.assert_frame_equal(df, expected)
expected = DataFrame(
{
"date": Series(1.0, index=range(5)),
"val": Series(range(5), dtype=np.int64),
}
)
df = DataFrame(
{
"date": date_range("2000-01-01", "2000-01-5"),
"val": Series(range(5), dtype=np.int64),
}
)
df.loc[:, "date"] = 1.0
tm.assert_frame_equal(df, expected)
# GH 15494
# setting on frame with single row
df = DataFrame({"date": Series([Timestamp("20180101")])})
df.loc[:, "date"] = "string"
expected = DataFrame({"date": Series(["string"])})
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=["x", "y"])
expected["x"] = expected["x"].astype(np.int64)
df = DataFrame(columns=["x", "y"])
df.loc[:, "x"] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=["x", "y"])
df["x"] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ("Respondent", "StartDate")] = pd.to_datetime(
df.loc[:, ("Respondent", "StartDate")]
)
df.loc[:, ("Respondent", "EndDate")] = pd.to_datetime(
df.loc[:, ("Respondent", "EndDate")]
)
df.loc[:, ("Respondent", "Duration")] = (
df.loc[:, ("Respondent", "EndDate")]
- df.loc[:, ("Respondent", "StartDate")]
)
df.loc[:, ("Respondent", "Duration")] = df.loc[
:, ("Respondent", "Duration")
].astype("timedelta64[s]")
expected = Series(
[1380, 720, 840, 2160.0], index=df.index, name=("Respondent", "Duration")
)
tm.assert_series_equal(df[("Respondent", "Duration")], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc["a", "A"] = 1
result = df.loc["a", "A"]
assert result == 1
result = df.iloc[0, 0]
assert result == 1
df.loc[:, "B":"D"] = 0
expected = df.loc[:, "B":"D"]
result = df.iloc[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=["A"])
df.loc[[4, 3, 5], "A"] = np.array([1, 2, 3], dtype="int64")
expected = DataFrame(dict(A=Series([1, 2, 3], index=[4, 3, 5]))).reindex(
index=[3, 5, 4]
)
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ["@" + str(i) for i in range(5)]
val1 = np.arange(5, dtype="int64")
keys2 = ["@" + str(i) for i in range(4)]
val2 = np.arange(4, dtype="int64")
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df["A"] = np.nan
df.loc[keys1, "A"] = val1
df["B"] = np.nan
df.loc[keys2, "B"] = val2
expected = DataFrame(
dict(A=Series(val1, index=keys1), B=Series(val2, index=keys2))
).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({"A": [1, 2, 3], "B": np.nan})
df.loc[df.B > df.A, "B"] = df.A
expected = DataFrame({"A": [1, 2, 3], "B": np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], "a": ["a", "b"]})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], "a": ["a", "b"]})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame(
{"A": ["foo", "bar", "baz"], "B": Series(range(3), dtype=np.int64)}
)
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame(
{"A": ["bar", "baz", "baz"], "B": Series([1, 2, 2], dtype=np.int64)}
)
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame(
{
"date": date_range("2000-01-01", "2000-01-5"),
"val": Series(range(5), dtype=np.int64),
}
)
expected = DataFrame(
{
"date": [
Timestamp("20000101"),
Timestamp("20000102"),
Timestamp("20000101"),
Timestamp("20000102"),
Timestamp("20000103"),
],
"val": Series([0, 1, 0, 1, 2], dtype=np.int64),
}
)
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"indexer", [["A"], slice(None, "A", None), np.array(["A"])]
)
@pytest.mark.parametrize("value", [["Z"], np.array(["Z"])])
def test_loc_setitem_with_scalar_index(self, indexer, value):
# GH #19474
# assigning like "df.loc[0, ['A']] = ['Z']" should be evaluated
# elementwisely, not using "setter('A', ['Z'])".
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
df.loc[0, indexer] = value
result = df.loc[0, "A"]
assert is_scalar(result) and result == "Z"
def test_loc_coercion(self):
# 12411
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_new_key_tz(self):
# GH#12862 should not raise on assigning the second value
vals = [
pd.to_datetime(42).tz_localize("UTC"),
pd.to_datetime(666).tz_localize("UTC"),
]
expected = pd.Series(vals, index=["foo", "bar"])
ser = pd.Series()
ser["foo"] = vals[0]
ser["bar"] = vals[1]
tm.assert_series_equal(ser, expected)
ser = pd.Series()
ser.loc["foo"] = vals[0]
ser.loc["bar"] = vals[1]
tm.assert_series_equal(ser, expected)
def test_loc_non_unique(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise because the we are non monotonic
df = DataFrame(
{"A": [1, 2, 3, 4, 5, 6], "B": [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3]
)
msg = "'Cannot get left slice bound for non-unique label: 1'"
with pytest.raises(KeyError, match=msg):
df.loc[1:]
msg = "'Cannot get left slice bound for non-unique label: 0'"
with pytest.raises(KeyError, match=msg):
df.loc[0:]
msg = "'Cannot get left slice bound for non-unique label: 1'"
with pytest.raises(KeyError, match=msg):
df.loc[1:2]
# monotonic are ok
df = DataFrame(
{"A": [1, 2, 3, 4, 5, 6], "B": [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3]
).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({"A": [2, 4, 5, 6], "B": [4, 6, 7, 8]}, index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({"A": [2, 4, 5], "B": [4, 6, 7]}, index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_non_unique_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list("ABCDEFG")
def gen_test(l, l2):
return pd.concat(
[
DataFrame(
np.random.randn(l, len(columns)),
index=np.arange(l),
columns=columns,
),
DataFrame(
np.ones((l2, len(columns))), index=[0] * l2, columns=columns
),
]
)
def gen_expected(df, mask):
len_mask = len(mask)
return pd.concat(
[
df.take([0]),
DataFrame(
np.ones((len_mask, len(columns))),
index=[0] * len_mask,
columns=columns,
),
df.take(mask[1:]),
]
)
df = gen_test(900, 100)
assert df.index.is_unique is False
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
assert df.index.is_unique is False
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = "index_name"
result = df.iloc[[0, 1]].index.name
assert result == "index_name"
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
result = df.ix[[0, 1]].index.name
assert result == "index_name"
result = df.loc[[0, 1]].index.name
assert result == "index_name"
def test_loc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(
df.loc[:, []], df.iloc[:, :0], check_index_type=True, check_column_type=True
)
# horizontal empty
tm.assert_frame_equal(
df.loc[[], :], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
# horizontal empty
tm.assert_frame_equal(
df.loc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
def test_identity_slice_returns_new_object(self):
# GH13873
original_df = DataFrame({"a": [1, 2, 3]})
sliced_df = original_df.loc[:]
assert sliced_df is not original_df
assert original_df[:] is not original_df
# should be a shallow copy
original_df["a"] = [4, 4, 4]
assert (sliced_df["a"] == 4).all()
# These should not return copies
assert original_df is original_df.loc[:, :]
df = DataFrame(np.random.randn(10, 4))
assert df[0] is df.loc[:, 0]
# Same tests for Series
original_series = Series([1, 2, 3, 4, 5, 6])
sliced_series = original_series.loc[:]
assert sliced_series is not original_series
assert original_series[:] is not original_series
original_series[:3] = [7, 8, 9]
assert all(sliced_series[:3] == [7, 8, 9])
def test_loc_uint64(self):
# GH20722
# Test whether loc accept uint64 max value as index.
s = pd.Series(
[1, 2], index=[np.iinfo("uint64").max - 1, np.iinfo("uint64").max]
)
result = s.loc[np.iinfo("uint64").max - 1]
expected = s.iloc[0]
assert result == expected
result = s.loc[[np.iinfo("uint64").max - 1]]
expected = s.iloc[[0]]
tm.assert_series_equal(result, expected)
result = s.loc[[np.iinfo("uint64").max - 1, np.iinfo("uint64").max]]
tm.assert_series_equal(result, s)
def test_loc_setitem_empty_append(self):
# GH6173, various appends to an empty dataframe
data = [1, 2, 3]
expected = DataFrame({"x": data, "y": [None] * len(data)})
# appends to fit length of data
df = DataFrame(columns=["x", "y"])
df.loc[:, "x"] = data
tm.assert_frame_equal(df, expected)
# only appends one value
expected = DataFrame({"x": [1.0], "y": [np.nan]})
df = DataFrame(columns=["x", "y"], dtype=np.float)
df.loc[0, "x"] = expected.loc[0, "x"]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_empty_append_raises(self):
# GH6173, various appends to an empty dataframe
data = [1, 2]
df = DataFrame(columns=["x", "y"])
msg = (
r"None of \[Int64Index\(\[0, 1\], dtype='int64'\)\] "
r"are in the \[index\]"
)
with pytest.raises(KeyError, match=msg):
df.loc[[0, 1], "x"] = data
msg = "cannot copy sequence with size 2 to array axis with dimension 0"
with pytest.raises(ValueError, match=msg):
df.loc[0:2, "x"] = data
def test_indexing_zerodim_np_array(self):
# GH24924
df = DataFrame([[1, 2], [3, 4]])
result = df.loc[np.array(0)]
s = pd.Series([1, 2], name=0)
tm.assert_series_equal(result, s)
def test_series_indexing_zerodim_np_array(self):
# GH24924
s = Series([1, 2])
result = s.loc[np.array(0)]
assert result == 1
def test_series_loc_getitem_label_list_missing_values():
# gh-11428
key = np.array(
["2001-01-04", "2001-01-02", "2001-01-04", "2001-01-14"], dtype="datetime64"
)
s = Series([2, 5, 8, 11], date_range("2001-01-01", freq="D", periods=4))
expected = Series([11.0, 5.0, 11.0, np.nan], index=key)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = s.loc[key]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"columns, column_key, expected_columns, check_column_type",
[
([2011, 2012, 2013], [2011, 2012], [0, 1], True),
([2011, 2012, "All"], [2011, 2012], [0, 1], False),
([2011, 2012, "All"], [2011, "All"], [0, 2], True),
],
)
def test_loc_getitem_label_list_integer_labels(
columns, column_key, expected_columns, check_column_type
):
# gh-14836
df = DataFrame(np.random.rand(3, 3), columns=columns, index=list("ABC"))
expected = df.iloc[:, expected_columns]
result = df.loc[["A", "B", "C"], column_key]
tm.assert_frame_equal(result, expected, check_column_type=check_column_type)
| bsd-3-clause |
nicococo/LatentSVDD | latentsvdd.py | 1 | 3231 | from cvxopt import matrix,spmatrix,sparse,uniform,normal,setseed
from cvxopt.blas import dot,dotu
from cvxopt.solvers import qp
from cvxopt.lapack import syev
import numpy as np
import math as math
from kernel import Kernel
from svdd import SVDD
import pylab as pl
import matplotlib.pyplot as plt
class LatentSVDD:
""" Latent variable support vector data description.
Written by Nico Goernitz, TU Berlin, 2014
For more information see:
'Learning and Evaluation with non-i.i.d Label Noise'
Goernitz et al., AISTATS & JMLR W&CP, 2014
"""
PRECISION = 10**-3 # important: effects the threshold, support vectors and speed!
C = 1.0 # (scalar) the regularization constant > 0
sobj = [] # structured object contains various functions
# i.e. get_num_dims(), get_num_samples(), get_sample(i), argmin(sol,i)
sol = [] # (vector) solution vector (after training, of course)
def __init__(self, sobj, C=1.0):
self.C = C
self.sobj = sobj
def train_dc(self, max_iter=200):
""" Solve the LatentSVDD optimization problem with a
sequential convex programming/DC-programming
approach:
Iteratively, find the most likely configuration of
the latent variables and then, optimize for the
model parameter using fixed latent states.
"""
N = self.sobj.get_num_samples()
DIMS = self.sobj.get_num_dims()
# intermediate solutions
# latent variables
latent = [0]*N
#sol = 1.0*uniform(DIMS,1)-0.5
sol = matrix(0.0, (DIMS,1))
psi = matrix(0.0, (DIMS,N)) # (dim x exm)
old_psi = matrix(0.0, (DIMS,N)) # (dim x exm)
threshold = 0
obj = -1
iter = 0
# terminate if objective function value doesn't change much
while iter<max_iter and (iter<3 or sum(sum(abs(np.array(psi-old_psi))))>=0.001):
print('Starting iteration {0}.'.format(iter))
print(sum(sum(abs(np.array(psi-old_psi)))))
iter += 1
old_psi = matrix(psi)
latent_old = list(latent)
# 1. linearize
# for the current solution compute the
# most likely latent variable configuration
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(foo, latent[i], psi[:,i]) = self.sobj.argmax(sol, i)
# 2. solve the intermediate convex optimization problem
kernel = Kernel.get_kernel(psi,psi)
svdd = SVDD(kernel, self.C)
svdd.train_dual()
threshold = svdd.get_threshold()
inds = svdd.get_support_dual()
alphas = svdd.get_support_dual_values()
sol = psi[:,inds]*alphas
#print alphas
self.sol = sol
self.latent = latent
return (sol, latent, threshold)
def apply(self, pred_sobj):
""" Application of the LatentSVDD:
anomaly_score = min_z ||c*-\Psi(x,z)||^2
latent_state = argmin_z ||c*-\Psi(x,z)||^2
"""
N = pred_sobj.get_num_samples()
norm2 = self.sol.trans()*self.sol
vals = matrix(0.0, (1,N))
lats = matrix(0.0, (1,N))
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(max_obj, lats[i], foo) = pred_sobj.argmax(self.sol, i)
vals[i] = -max_obj
return (vals, lats)
| mit |
MartinDelzant/scikit-learn | sklearn/ensemble/gradient_boosting.py | 50 | 67625 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._splitter import PresortBestSplitter
from ..tree._criterion import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
ndingwall/scikit-learn | examples/kernel_approximation/plot_scalable_poly_kernels.py | 15 | 7266 | """
=======================================================
Scalable learning with polynomial kernel aproximation
=======================================================
This example illustrates the use of :class:`PolynomialCountSketch` to
efficiently generate polynomial kernel feature-space approximations.
This is used to train linear classifiers that approximate the accuracy
of kernelized ones.
.. currentmodule:: sklearn.kernel_approximation
We use the Covtype dataset [2], trying to reproduce the experiments on the
original paper of Tensor Sketch [1], i.e. the algorithm implemented by
:class:`PolynomialCountSketch`.
First, we compute the accuracy of a linear classifier on the original
features. Then, we train linear classifiers on different numbers of
features (`n_components`) generated by :class:`PolynomialCountSketch`,
approximating the accuracy of a kernelized classifier in a scalable manner.
"""
print(__doc__)
# Author: Daniel Lopez-Sanchez <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_covtype
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, Normalizer
from sklearn.svm import LinearSVC
from sklearn.kernel_approximation import PolynomialCountSketch
from sklearn.pipeline import Pipeline, make_pipeline
import time
# %%
# Load the Covtype dataset, which contains 581,012 samples
# with 54 features each, distributed among 6 classes. The goal of this dataset
# is to predict forest cover type from cartographic variables only
# (no remotely sensed data). After loading, we transform it into a binary
# classification problem to match the version of the dataset in the
# LIBSVM webpage [2], which was the one used in [1].
X, y = fetch_covtype(return_X_y=True)
y[y != 2] = 0
y[y == 2] = 1 # We will try to separate class 2 from the other 6 classes.
# %%
# Here we select 5,000 samples for training and 10,000 for testing.
# To actually reproduce the results in the original Tensor Sketch paper,
# select 100,000 for training.
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=5_000,
test_size=10_000,
random_state=42)
# %%
# Now scale features to the range [0, 1] to match the format of the dataset in
# the LIBSVM webpage, and then normalize to unit length as done in the
# original Tensor Sketch paper [1].
mm = make_pipeline(MinMaxScaler(), Normalizer())
X_train = mm.fit_transform(X_train)
X_test = mm.transform(X_test)
# %%
# As a baseline, train a linear SVM on the original features and print the
# accuracy. We also measure and store accuracies and training times to
# plot them latter.
results = {}
lsvm = LinearSVC()
start = time.time()
lsvm.fit(X_train, y_train)
lsvm_time = time.time() - start
lsvm_score = 100 * lsvm.score(X_test, y_test)
results["LSVM"] = {"time": lsvm_time, "score": lsvm_score}
print(f"Linear SVM score on raw features: {lsvm_score:.2f}%")
# %%
# Then we train linear SVMs on the features generated by
# :class:`PolynomialCountSketch` with different values for `n_components`,
# showing that these kernel feature approximations improve the accuracy
# of linear classification. In typical application scenarios, `n_components`
# should be larger than the number of features in the input representation
# in order to achieve an improvement with respect to linear classification.
# As a rule of thumb, the optimum of evaluation score / run time cost is
# typically achieved at around `n_components` = 10 * `n_features`, though this
# might depend on the specific dataset being handled. Note that, since the
# original samples have 54 features, the explicit feature map of the
# polynomial kernel of degree four would have approximately 8.5 million
# features (precisely, 54^4). Thanks to :class:`PolynomialCountSketch`, we can
# condense most of the discriminative information of that feature space into a
# much more compact representation. We repeat the experiment 5 times to
# compensate for the stochastic nature of :class:`PolynomialCountSketch`.
n_runs = 3
for n_components in [250, 500, 1000, 2000]:
ps_lsvm_time = 0
ps_lsvm_score = 0
for _ in range(n_runs):
pipeline = Pipeline(steps=[("kernel_approximator",
PolynomialCountSketch(
n_components=n_components,
degree=4)),
("linear_classifier", LinearSVC())])
start = time.time()
pipeline.fit(X_train, y_train)
ps_lsvm_time += time.time() - start
ps_lsvm_score += 100 * pipeline.score(X_test, y_test)
ps_lsvm_time /= n_runs
ps_lsvm_score /= n_runs
results[f"LSVM + PS({n_components})"] = {
"time": ps_lsvm_time, "score": ps_lsvm_score
}
print(f"Linear SVM score on {n_components} PolynomialCountSketch " +
f"features: {ps_lsvm_score:.2f}%")
# %%
# Train a kernelized SVM to see how well :class:`PolynomialCountSketch`
# is approximating the performance of the kernel. This, of course, may take
# some time, as the SVC class has a relatively poor scalability. This is the
# reason why kernel approximators are so useful:
from sklearn.svm import SVC
ksvm = SVC(C=500., kernel="poly", degree=4, coef0=0, gamma=1.)
start = time.time()
ksvm.fit(X_train, y_train)
ksvm_time = time.time() - start
ksvm_score = 100 * ksvm.score(X_test, y_test)
results["KSVM"] = {"time": ksvm_time, "score": ksvm_score}
print(f"Kernel-SVM score on raw featrues: {ksvm_score:.2f}%")
# %%
# Finally, plot the resuts of the different methods against their training
# times. As we can see, the kernelized SVM achieves a higher accuracy,
# but its training time is much larger and, most importantly, will grow
# much faster if the number of training samples increases.
N_COMPONENTS = [250, 500, 1000, 2000]
fig, ax = plt.subplots(figsize=(7, 7))
ax.scatter([results["LSVM"]["time"], ], [results["LSVM"]["score"], ],
label="Linear SVM", c="green", marker="^")
ax.scatter([results["LSVM + PS(250)"]["time"], ],
[results["LSVM + PS(250)"]["score"], ],
label="Linear SVM + PolynomialCountSketch", c="blue")
for n_components in N_COMPONENTS:
ax.scatter([results[f"LSVM + PS({n_components})"]["time"], ],
[results[f"LSVM + PS({n_components})"]["score"], ],
c="blue")
ax.annotate(f"n_comp.={n_components}",
(results[f"LSVM + PS({n_components})"]["time"],
results[f"LSVM + PS({n_components})"]["score"]),
xytext=(-30, 10), textcoords="offset pixels")
ax.scatter([results["KSVM"]["time"], ], [results["KSVM"]["score"], ],
label="Kernel SVM", c="red", marker="x")
ax.set_xlabel("Training time (s)")
ax.set_ylabel("Accurary (%)")
ax.legend()
plt.show()
# %%
# References
# ==========
#
# [1] Pham, Ninh and Rasmus Pagh. "Fast and scalable polynomial kernels via
# explicit feature maps." KDD '13 (2013).
# https://doi.org/10.1145/2487575.2487591
#
# [2] LIBSVM binary datasets repository
# https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html
| bsd-3-clause |
rseubert/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 30 | 1812 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
"""
Test BayesianRidge on diabetes
"""
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
"""
Test BayesianRidge on toy
"""
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
"""
Test BayesianRegression ARD classifier
"""
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
doublsky/MLProfile | prof_blas.py | 1 | 9103 | """
Profile all bench_list
"""
import subprocess as sp
import pandas as pd
import argparse
from util import *
import socket
rpt_cmd = "opreport -l -n".split()
# read a list of interested kernels
def trim_func_param(infile, outfile):
with open(infile, "r") as inf, open(outfile, "w") as outf:
for line in inf:
remainder = line.split("(")[0]
outf.write(remainder + "\n")
def process_rpt(rpt, results_df, idx):
# global results_df, idx
# read results into a datafram
rpt_df = pd.read_table(rpt, delim_whitespace=True, header=None, index_col=False,
names=["samples", "percent", "image_name", "symbol_name"])
# select kernels / exclude kernels
if args.kexclude:
for kernel in kernel_list:
rpt_df = rpt_df[~(rpt_df["symbol_name"].str.contains(kernel))]
# copy rest kernels
for _, row in rpt_df.iterrows():
if args.kexclude:
results_df.set_value(idx, row["symbol_name"], row["percent"])
else:
if row["symbol_name"] in kernel_list:
results_df.set_value(idx, row["symbol_name"], row["percent"])
# move to next record
return idx + 1
def test_bench(args):
# iterate through all benchmarks
with open(args.blist, "r") as bench_list:
for bench in bench_list:
if bench.startswith("#"): # allow commenting in benchmark list
continue
test_cmd = ["timeout", "-k", "3", "3", "python", benchfile]
config_file = get_config_file(benchfile, args.tool)
with open(config_file, "r") as config_list, open(args.output, "w") as outfile:
for config in config_list:
maybe_create_dataset(config)
sp.call(test_cmd + config.split(), stdout=outfile, stderr=outfile)
def perf_bench(args):
# iterate through all benchmarks
with open(args.blist, "r") as bench_list:
for bench in bench_list:
if bench.startswith("#"): # allow commenting in benchmark list
continue
# init
benchfile = "benchmark/" + bench.rstrip()
perf_cmd = ["operf", "--event=CPU_CLK_UNHALTED:3000000", "python", benchfile]
results_df = pd.DataFrame()
idx = 0
with open(get_config_file(benchfile, "perf"), "r") as config_list:
for config in config_list:
maybe_create_dataset(config)
try:
sp.check_call(perf_cmd + config.split())
sp.check_call(rpt_cmd + ["-o", "/tmp/blasrpt.tmp"])
trim_func_param("/tmp/blasrpt.tmp", "/tmp/blasrpt_trimmed.tmp")
idx = process_rpt("/tmp/blasrpt_trimmed.tmp", results_df, idx)
finally:
# post processing (generate signature)
#for index, row in results_df.iterrows():
# sig = get_series_signature(row)
# results_df.set_value(index, "signature", sig)
# export to .csv
results_file = benchfile.replace("bench_", "perf_")
results_file = results_file.replace(".py", ".csv")
results_df.to_csv(results_file, index=False)
def time_bench(args):
# iterate through all benchmarks
with open(args.blist, "r") as bench_list:
for bench in bench_list:
if bench.startswith("#"): # allow commenting in benchmark list
continue
# init
benchfile = "benchmark/" + bench.rstrip()
time_output = benchfile.replace(".py", ".time")
cmd = ["/usr/bin/time", "-a", "-o", time_output, "python"] + [benchfile]
# foreach configuration
with open(get_config_file(benchfile, "time"), "r") as config_file:
for config in config_file:
maybe_create_dataset(config)
sp.check_call(cmd + config.split())
def trace2csv(csvfile, count, comm_mat):
total = 0
for key, value in comm_mat.iteritems():
total += value
with open(csvfile, "a") as resutls:
for key, value in comm_mat.iteritems():
resutls.write("{},{},{},{}\n".format(count, key[0], key[1], float(value)/total))
def accumulate_comm_mat(partial_comm_mat, comm_mat):
total = 0
for key, value in partial_comm_mat.iteritems():
total += value
for key, value in partial_comm_mat.iteritems():
if key in comm_mat:
comm_mat[key] += float(partial_comm_mat[key]) / total
else:
comm_mat[key] = float(partial_comm_mat[key]) / total
def pin_bench(args):
# force numpy to run in single thread
os.environ["OMP_NUM_THREADS"] = "1"
# get pin root
pin_home = os.environ["PIN_ROOT"]
pin_cmd = [pin_home+"/pin", "-t", "pintools/obj-intel64/procatrace.so"]
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# iterate through all benchmarks
with open(args.blist, "r") as bench_list:
for bench in bench_list:
if bench.startswith("#"): # allow commenting in benchmark list
continue
# init
bench = bench.rstrip()
benchfile = "benchmark/" + bench
config_file = get_config_file(benchfile, "pin")
count = 0
outfile = benchfile.replace(".py", "_pin.csv")
if os.path.exists(outfile):
os.remove(outfile)
with open(outfile, "w") as f:
f.write("use case,producer,consumer,comm weight\n")
with open(config_file, 'r') as config_list:
for configs in config_list:
# init
tracefile = bench.replace(".py", "_config"+str(count)+".trace")
tracefile = os.path.join(args.outdir, tracefile)
# skip profile if output file exist
if not os.path.exists(tracefile):
# create dataset if not exist
maybe_create_dataset(configs)
# call pin
full_cmd = list(pin_cmd)
full_cmd += ["-output", tracefile, "--", "python", benchfile]
full_cmd += configs.split()
try:
sp.check_call(full_cmd)
except:
os.remove(tracefile)
raise
with open(tracefile, "r") as trace:
comm_mat = parse_trace(trace)
trace2csv(outfile, count, comm_mat)
# remove tracefile if it is too large
if os.path.getsize(tracefile) > 1e10:
os.remove(tracefile)
count += 1
if __name__ == "__main__":
# top level parser
parser = argparse.ArgumentParser(description="Run benchmarks, collect data")
parser.add_argument("--blist", default="bench_list.txt", help="path to benchmark list")
subparsers = parser.add_subparsers(help="available sub-command")
# parser for time
parser_time = subparsers.add_parser("time", help="time each benchmark")
parser_time.set_defaults(func=time_bench)
# parser for operf
parser_perf = subparsers.add_parser("perf", help="profile using operf")
parser_perf.add_argument("--klist", default="kernel_list.txt", help="path to kernel list")
parser_perf.add_argument("--kexclude", action="store_true", help="exclude kernels in klist")
parser_perf.add_argument("--test", action="store_true", help="Test benchmarks, do not profile.")
parser_perf.set_defaults(func=perf_bench)
# parser for pin
parser_pin = subparsers.add_parser("pin", help="run Pin, generate memory reference trace")
parser_pin.add_argument("--klist", default="kernel_list.txt", help="path to kernel list file")
parser_pin.add_argument("--outdir", default="pin_out", help="path to output directory")
parser_pin.set_defaults(func=pin_bench)
# parser for test
parser_test = subparsers.add_parser("test", help="test validity of benchmark configurations")
parser_test.add_argument("--tool", default="perf", choices=["time", "perf", "pin"], help="for which tool")
parser_test.add_argument("--output", default="test.log", help="path to test results file")
parser_test.set_defaults(func=test_bench)
# parser command-line args
args = parser.parse_args()
with open(args.klist, "r") as klist_file:
kernel_list = klist_file.readlines()
kernel_list = map(lambda x: x.rstrip(), kernel_list)
args.func(args)
| mit |
0asa/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/indexing/test_iloc.py | 2 | 23461 | """ test positional based indexing with iloc """
import pytest
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.compat import lrange, lmap
from pandas import Series, DataFrame, date_range, concat, isna
from pandas.util import testing as tm
from pandas.tests.indexing.common import Base
class TestiLoc(Base):
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assert_raises_regex(IndexError,
'positional indexers '
'are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
pytest.raises(IndexError, lambda: df.iloc[[1, 30]])
pytest.raises(IndexError, lambda: df.iloc[[1, -30]])
pytest.raises(IndexError, lambda: df.iloc[[100]])
s = df['A']
pytest.raises(IndexError, lambda: s.iloc[[100]])
pytest.raises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assert_raises_regex(IndexError, msg):
df.iloc[30]
pytest.raises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assert_raises_regex(IndexError, msg):
s.iloc[30]
pytest.raises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
pytest.raises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
pytest.raises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
assert result == expected
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
assert isna(result)
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize(
'data, indexes, values, expected_k', [
# test without indexer value in first level of MultiIndex
([[2, 22, 5], [2, 33, 6]], [0, -1, 1], [2, 3, 1], [7, 10]),
# test like code sample 1 in the issue
([[1, 22, 555], [1, 33, 666]], [0, -1, 1], [200, 300, 100],
[755, 1066]),
# test like code sample 2 in the issue
([[1, 3, 7], [2, 4, 8]], [0, -1, 1], [10, 10, 1000], [17, 1018]),
# test like code sample 3 in the issue
([[1, 11, 4], [2, 22, 5], [3, 33, 6]], [0, -1, 1], [4, 7, 10],
[8, 15, 13])
])
def test_iloc_setitem_int_multiindex_series(
self, data, indexes, values, expected_k):
# GH17148
df = pd.DataFrame(
data=data,
columns=['i', 'j', 'k'])
df = df.set_index(['i', 'j'])
series = df.k.copy()
for i, v in zip(indexes, values):
series.iloc[i] += v
df['k'] = expected_k
expected = df.k
tm.assert_series_equal(series, expected)
def test_iloc_setitem_list(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_pandas_object(self):
# GH 17193, affecting old numpy (1.7 and 1.8)
s_orig = Series([0, 1, 2, 3])
expected = Series([0, -1, -2, 3])
s = s_orig.copy()
s.iloc[Series([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.iloc[pd.Index([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
assert result == exp
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
assert result == exp
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
assert result == exp
# out-of-bounds exception
pytest.raises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
pytest.raises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
assert result == 1
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
pytest.raises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
pytest.raises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_identity_slice_returns_new_object(self):
# GH13873
original_df = DataFrame({'a': [1, 2, 3]})
sliced_df = original_df.iloc[:]
assert sliced_df is not original_df
# should be a shallow copy
original_df['a'] = [4, 4, 4]
assert (sliced_df['a'] == 4).all()
original_series = Series([1, 2, 3, 4, 5, 6])
sliced_series = original_series.iloc[:]
assert sliced_series is not original_series
# should also be a shallow copy
original_series[:3] = [7, 8, 9]
assert all(sliced_series[:3] == [7, 8, 9])
| apache-2.0 |
alex-pirozhenko/sklearn-pmml | sklearn_pmml/convert/test/test_decisionTreeClassifierConverter.py | 2 | 5880 | import numpy as np
from sklearn_pmml.convert.test.jpmml_test import JPMMLClassificationTest, JPMMLRegressionTest, TARGET_NAME, TARGET
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn_pmml.convert import TransformationContext, pmml_row, ModelMode, Schema
from sklearn_pmml.convert.features import *
from sklearn_pmml.convert.tree import DecisionTreeConverter
from sklearn_pmml import pmml
from unittest import TestCase
class TestDecisionTreeClassifierConverter(TestCase):
def setUp(self):
np.random.seed(1)
self.est = DecisionTreeClassifier(max_depth=2)
self.est.fit([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
], [0, 1, 1, 1])
self.ctx = TransformationContext({
Schema.INPUT: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one'])
],
Schema.MODEL: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one'])
],
Schema.DERIVED: [],
Schema.OUTPUT: [
IntegerNumericFeature('output')
]
})
self.converter = DecisionTreeConverter(
estimator=self.est,
context=self.ctx,
mode=ModelMode.CLASSIFICATION
)
def test_transform(self):
p = self.converter.pmml()
tm = p.TreeModel[0]
assert tm.MiningSchema is not None, 'Missing mining schema'
assert len(tm.MiningSchema.MiningField) == 2, 'Wrong number of mining fields'
assert tm.Node is not None, 'Missing root node'
assert tm.Node.recordCount == 4
assert tm.Node.True_ is not None, 'Root condition should always be True'
def test_transform_with_derived_field(self):
self.est = DecisionTreeClassifier(max_depth=2)
self.est.fit([
[0, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 1, 1],
], [0, 1, 1, 1])
mapping = pmml.MapValues(dataType="double", outputColumn="output")
mapping.append(pmml.FieldColumnPair(column="x1", field="x1"))
mapping.append(pmml.FieldColumnPair(column="x2", field="x2"))
it = pmml.InlineTable()
mapping_df = pd.DataFrame([
dict(x1=0, x2='zero', output=0),
dict(x1=0, x2='one', output=0),
dict(x1=1, x2='zero', output=0),
dict(x1=1, x2='one', output=1),
])
for idx, line in mapping_df.iterrows():
it.append(pmml_row(**dict(line)))
mapping.append(it)
mapping_df.set_index(keys=['x1', 'x2'])
mapping_f = np.vectorize(lambda x1, x2: mapping_df.ix[x1, x2].output.values[0])
self.ctx = TransformationContext({
Schema.INPUT: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one'])
],
Schema.DERIVED: [
DerivedFeature(
feature=RealNumericFeature(name='x3'),
transformation=mapping,
function=mapping_f
)
],
Schema.MODEL: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one']),
RealNumericFeature(name='x3')
],
Schema.OUTPUT: [
IntegerCategoricalFeature('output', ['neg', 'pos'])
]
})
self.converter = DecisionTreeConverter(
estimator=self.est,
context=self.ctx,
mode=ModelMode.CLASSIFICATION
)
self.converter.pmml().toxml()
class TestDecisionTreeRegressorConverter(TestCase):
def setUp(self):
np.random.seed(1)
self.est = DecisionTreeRegressor(max_depth=2)
self.est.fit([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
], [0, 1, 1, 1])
self.ctx = TransformationContext({
Schema.INPUT: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one'])
],
Schema.MODEL: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one'])
],
Schema.DERIVED: [],
Schema.OUTPUT: [
IntegerNumericFeature('output')
]
})
self.converter = DecisionTreeConverter(
estimator=self.est,
context=self.ctx,
mode=ModelMode.REGRESSION
)
def test_transform(self):
p = self.converter.pmml()
tm = p.TreeModel[0]
assert tm.MiningSchema is not None, 'Missing mining schema'
assert len(tm.MiningSchema.MiningField) == 2, 'Wrong number of mining fields'
assert tm.Node is not None, 'Missing root node'
assert tm.Node.recordCount == 4
assert tm.Node.True_ is not None, 'Root condition should always be True'
class TestDecisionTreeClassificationJPMMLParity(TestCase, JPMMLClassificationTest):
def setUp(self):
self.model = DecisionTreeClassifier(max_depth=2)
self.init_data()
self.converter = DecisionTreeConverter(
estimator=self.model,
context=self.ctx,
mode=ModelMode.CLASSIFICATION
)
@property
def output(self):
return IntegerCategoricalFeature(name=TARGET_NAME, value_list=TARGET)
class TestDecisionTreeRegressionJPMMLParity(TestCase, JPMMLRegressionTest):
def setUp(self):
self.model = DecisionTreeRegressor()
self.init_data()
self.converter = DecisionTreeConverter(
estimator=self.model,
context=self.ctx,
mode=ModelMode.REGRESSION
)
| mit |
mblondel/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
Dapid/numpy | doc/example.py | 81 | 3581 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
johnmgregoire/PythonCompositionPlots | myternarydemo_hsvcompdiff.py | 1 | 1031 | from myternaryutility import TernaryPlot
import matplotlib.cm as cm
import numpy
import pylab, copy
from colorsys import rgb_to_hsv
from colorsys import hsv_to_rgb
pylab.figure(figsize=(6, 3))
ax=pylab.gca()
#stp = TernaryPlot(ax, ellabels=['Au', 'Cu', 'Si'])
stp = TernaryPlot(ax, ellabels=['A', 'B', 'C'])
stp.grid(nintervals=10, printticklabels=[4])
stp.label(fontsize=12)
comps=numpy.random.rand(50, 3)
comps/=comps.sum(axis=1)[:, numpy.newaxis]
#compdist=(numpy.random.rand(len(comps), 3)-0.5)/5
comps2=copy.copy(comps)
comps2[:, 2]+=.5
comps2/=comps2.sum(axis=1)[:, numpy.newaxis]
#compsdiff=comps2-comps
#
#terncoord=numpy.float64(comps)
#terncoord2=numpy.float64(comps2)
#
#
#
#sat = ((compsdiff**2).sum(axis=1)/2.)**.5
#
#huelist=[0. if cd.sum()==0. else rgb_to_hsv(*(cd/cd.sum()))[0] for cd in numpy.abs(compsdiff)]
#
#sat_norm=sat/max(sat)
#
#rgblist=[hsv_to_rgb(h, s, 1) for h, s in zip(huelist, sat_norm)]
#rgb_arr=stp.complex_to_rgb(ang, sat_norm)
stp.hsdiffplot(comps, comps2)
#
#
pylab.show()
print 'done'
| bsd-3-clause |
appapantula/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
andyfaff/scipy | scipy/stats/_stats_mstats_common.py | 12 | 16438 | import numpy as np
import scipy.stats.stats
from . import distributions
from .._lib._bunch import _make_tuple_bunch
__all__ = ['_find_repeats', 'linregress', 'theilslopes', 'siegelslopes']
# This is not a namedtuple for backwards compatibility. See PR #12983
LinregressResult = _make_tuple_bunch('LinregressResult',
['slope', 'intercept', 'rvalue',
'pvalue', 'stderr'],
extra_field_names=['intercept_stderr'])
def linregress(x, y=None, alternative='two-sided'):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length. If
only `x` is given (and ``y=None``), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension. In
the case where ``y=None`` and `x` is a 2x2 array, ``linregress(x)`` is
equivalent to ``linregress(x[0], x[1])``.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the slope of the regression line is nonzero
* 'less': the slope of the regression line is less than zero
* 'greater': the slope of the regression line is greater than zero
.. versionadded:: 1.7.0
Returns
-------
result : ``LinregressResult`` instance
The return value is an object with the following attributes:
slope : float
Slope of the regression line.
intercept : float
Intercept of the regression line.
rvalue : float
Correlation coefficient.
pvalue : float
The p-value for a hypothesis test whose null hypothesis is
that the slope is zero, using Wald Test with t-distribution of
the test statistic. See `alternative` above for alternative
hypotheses.
stderr : float
Standard error of the estimated slope (gradient), under the
assumption of residual normality.
intercept_stderr : float
Standard error of the estimated intercept, under the assumption
of residual normality.
See Also
--------
scipy.optimize.curve_fit :
Use non-linear least squares to fit a function to data.
scipy.optimize.leastsq :
Minimize the sum of squares of a set of equations.
Notes
-----
Missing values are considered pair-wise: if a value is missing in `x`,
the corresponding value in `y` is masked.
For compatibility with older versions of SciPy, the return value acts
like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``,
``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write::
slope, intercept, r, p, se = linregress(x, y)
With that style, however, the standard error of the intercept is not
available. To have access to all the computed values, including the
standard error of the intercept, use the return value as an object
with attributes, e.g.::
result = linregress(x, y)
print(result.intercept, result.intercept_stderr)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> rng = np.random.default_rng()
Generate some data:
>>> x = rng.random(10)
>>> y = 1.6*x + rng.random(10)
Perform the linear regression:
>>> res = stats.linregress(x, y)
Coefficient of determination (R-squared):
>>> print(f"R-squared: {res.rvalue**2:.6f}")
R-squared: 0.717533
Plot the data along with the fitted line:
>>> plt.plot(x, y, 'o', label='original data')
>>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line')
>>> plt.legend()
>>> plt.show()
Calculate 95% confidence interval on slope and intercept:
>>> # Two-sided inverse Students t-distribution
>>> # p - probability, df - degrees of freedom
>>> from scipy.stats import t
>>> tinv = lambda p, df: abs(t.ppf(p/2, df))
>>> ts = tinv(0.05, len(x)-2)
>>> print(f"slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}")
slope (95%): 1.453392 +/- 0.743465
>>> print(f"intercept (95%): {res.intercept:.6f}"
... f" +/- {ts*res.intercept_stderr:.6f}")
intercept (95%): 0.616950 +/- 0.544475
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
raise ValueError("If only `x` is given as input, it has to "
"be of shape (2, N) or (N, 2); provided shape "
f"was {x.shape}.")
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# Average sums of square differences from the mean
# ssxm = mean( (x-mean(x))^2 )
# ssxym = mean( (x-mean(x)) * (y-mean(y)) )
ssxm, ssxym, _, ssym = np.cov(x, y, bias=1).flat
# R-value
# r = ssxym / sqrt( ssxm * ssym )
if ssxm == 0.0 or ssym == 0.0:
# If the denominator was going to be 0
r = 0.0
else:
r = ssxym / np.sqrt(ssxm * ssym)
# Test for numerical error propagation (make sure -1 < r < 1)
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
slope = ssxym / ssxm
intercept = ymean - slope*xmean
if n == 2:
# handle case when only two points are passed in
if y[0] == y[1]:
prob = 1.0
else:
prob = 0.0
slope_stderr = 0.0
intercept_stderr = 0.0
else:
df = n - 2 # Number of degrees of freedom
# n-2 degrees of freedom because 2 has been used up
# to estimate the mean and standard deviation
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
t, prob = scipy.stats.stats._ttest_finish(df, t, alternative)
slope_stderr = np.sqrt((1 - r**2) * ssym / ssxm / df)
# Also calculate the standard error of the intercept
# The following relationship is used:
# ssxm = mean( (x-mean(x))^2 )
# = ssx - sx*sx
# = mean( x^2 ) - mean(x)^2
intercept_stderr = slope_stderr * np.sqrt(ssxm + xmean**2)
return LinregressResult(slope=slope, intercept=intercept, rvalue=r,
pvalue=prob, stderr=slope_stderr,
intercept_stderr=intercept_stderr)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
See also
--------
siegelslopes : a similar technique using repeated medians
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on
Kendall's tau", J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" %
(len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
sum(k * (k-1) * (2*k + 5) for k in nxreps) -
sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
def siegelslopes(y, x=None, method="hierarchical"):
r"""
Computes the Siegel estimator for a set of points (x, y).
`siegelslopes` implements a method for robust linear regression
using repeated medians (see [1]_) to fit a line to the points (x, y).
The method is robust to outliers with an asymptotic breakdown point
of 50%.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
method : {'hierarchical', 'separate'}
If 'hierarchical', estimate the intercept using the estimated
slope ``medslope`` (default option).
If 'separate', estimate the intercept independent of the estimated
slope. See Notes for details.
Returns
-------
medslope : float
Estimate of the slope of the regression line.
medintercept : float
Estimate of the intercept of the regression line.
See also
--------
theilslopes : a similar technique without repeated medians
Notes
-----
With ``n = len(y)``, compute ``m_j`` as the median of
the slopes from the point ``(x[j], y[j])`` to all other `n-1` points.
``medslope`` is then the median of all slopes ``m_j``.
Two ways are given to estimate the intercept in [1]_ which can be chosen
via the parameter ``method``.
The hierarchical approach uses the estimated slope ``medslope``
and computes ``medintercept`` as the median of ``y - medslope*x``.
The other approach estimates the intercept separately as follows: for
each point ``(x[j], y[j])``, compute the intercepts of all the `n-1`
lines through the remaining points and take the median ``i_j``.
``medintercept`` is the median of the ``i_j``.
The implementation computes `n` times the median of a vector of size `n`
which can be slow for large vectors. There are more efficient algorithms
(see [2]_) which are not implemented here.
References
----------
.. [1] A. Siegel, "Robust Regression Using Repeated Medians",
Biometrika, Vol. 69, pp. 242-244, 1982.
.. [2] A. Stein and M. Werman, "Finding the repeated median regression
line", Proceedings of the Third Annual ACM-SIAM Symposium on
Discrete Algorithms, pp. 409-413, 1992.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope and intercept. For comparison, also compute the
least-squares fit with `linregress`:
>>> res = stats.siegelslopes(y, x)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Siegel regression line is shown in red. The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
if method not in ['hierarchical', 'separate']:
raise ValueError("method can only be 'hierarchical' or 'separate'")
y = np.asarray(y).ravel()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.asarray(x, dtype=float).ravel()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" %
(len(y), len(x)))
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes, intercepts = [], []
for j in range(len(x)):
id_nonzero = deltax[j, :] != 0
slopes_j = deltay[j, id_nonzero] / deltax[j, id_nonzero]
medslope_j = np.median(slopes_j)
slopes.append(medslope_j)
if method == 'separate':
z = y*x[j] - y[j]*x
medintercept_j = np.median(z[id_nonzero] / deltax[j, id_nonzero])
intercepts.append(medintercept_j)
medslope = np.median(np.asarray(slopes))
if method == "separate":
medinter = np.median(np.asarray(intercepts))
else:
medinter = np.median(y - medslope*x)
return medslope, medinter
| bsd-3-clause |
hkailee/FluSeq | ms2ContLearning.py | 1 | 12630 | #!/usr/bin/env python3.4
__author__ = 'mdc_hk'
version = '1.0'
#===========================================================================================================
import datetime, logging, multiprocessing, os, re, subprocess, sys, time
import pandas as pd
from pandas import Series
from bs4 import BeautifulSoup
#===========================================================================================================
# Functions:
# 1: Checks if in proper number of arguments are passed gives instructions on proper use.
def argsCheck(numArgs):
if len(sys.argv) < numArgs or len(sys.argv) > numArgs:
print('To learn about contamination rate from system control')
print('Usage:', sys.argv[0], '<FolderInput>',' <DateOfRunInYYMMDD>')
print('Examples:', sys.argv[0], 'FolderMS2_001', '151225')
exit(1) # Aborts program. (exit(1) indicates that an error occurred)
#===========================================================================================================
# Housekeeping.
argsCheck(3) # Checks if the number of arguments are correct.
# Stores file one for input checking.
inFolder = sys.argv[1]
dateOfRun = sys.argv[2]
# Setting up working and fluSeq directories...
workingFolder_tmp = '~/FluSeq/' + inFolder
os.chdir(os.path.expanduser(workingFolder_tmp))
workingFolder = os.getcwd()
fluSeqFolder = os.path.expanduser('~/FluSeq/')
# Logging events...
logging.basicConfig(filename=workingFolder + '/Log.txt', level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
startTime = time.time()
logging.info('Command-line invocation: ' + sys.argv[0] + ' ' + sys.argv[1])
logging.info('Runfolder path: ' + workingFolder)
# determining number of logical CPUs availalbe in your PC...
numberOfProcessors = multiprocessing.cpu_count()
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- The program will be optimized to use '
+ str(numberOfProcessors) + ' logical CPUs available in your PC.')
logging.info('Detected logical CPUs: ' + str(numberOfProcessors))
# Filing number of unique samples found in the working folder...
workingFilesR1 = [f for f in os.listdir(workingFolder) if re.match(r'[\S]+S\d+_L001_R1_001\.fastq\.gz', f)]
fastqFileNameR1 = re.compile(r'(([\S]+)_S\d+_L001_R)1_001\.fastq\.gz')
fastqFilesR1 = []
for file in workingFilesR1:
fastqFilesR1.append(fastqFileNameR1.findall(file))
# Starting the analyses...
for file in fastqFilesR1:
# bwa aln...
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Performing bwa aln for paired-end reads '
'of sample ' + file[0][1])
proc1 = subprocess.Popen(['bwa', 'aln', '-t', str(numberOfProcessors), '-q', '15', '-f',
workingFolder + '/' + file[0][0] + '1_001.sai',
fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa',
workingFolder + '/' + file[0][0]+'1_001.fastq.gz'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc1.communicate()
logging.info('bwa aln -t ' + str(numberOfProcessors) + ' -q 15 -f ' + workingFolder + '/'
+ file[0][0] + '1_001.sai ' + fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa '
+ workingFolder + '/' + file[0][0]+'1_001.fastq.gz')
proc2 = subprocess.Popen(['bwa', 'aln', '-t', str(numberOfProcessors), '-q', '15',
'-f', workingFolder + '/' + file[0][0] + '2_001.sai',
fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa',
workingFolder + '/' + file[0][0]+'2_001.fastq.gz'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc2.communicate()
logging.info('bwa aln -t ' + str(numberOfProcessors) + ' -q 15 -f ' + workingFolder + '/'
+ file[0][0] + '2_001.sai ' + fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa '
+ workingFolder + '/' + file[0][0]+'2_001.fastq.gz')
# bwa sampe...
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Performing bwa sampe for sample ' +
file[0][1])
proc3 = subprocess.Popen(['bwa', 'sampe', '-r' + '@RG\tID:'+file[0][1]+'\tPL:ILLUMINA\tSM:'+file[0][1],
'-f', workingFolder + '/' + file[0][1] + '.uncompressed.bam',
fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa',
workingFolder + '/' + file[0][0] + '1_001.sai',
workingFolder + '/' + file[0][0] + '2_001.sai',
workingFolder + '/' + file[0][0] + '1_001.fastq.gz',
workingFolder + '/' + file[0][0] + '2_001.fastq.gz'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc3.communicate()
logging.info('bwa sampe -r @RG\tID:'+file[0][1]+'\tPL:ILLUMINA\tSM:'+file[0][1] +
' -f '+ workingFolder + '/' + file[0][1] + '.uncompressed.bam ' +
fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa ' +
workingFolder + '/' + file[0][0] + '1_001.sai ' +
workingFolder + '/' + file[0][0] + '2_001.sai ' +
workingFolder + '/' + file[0][0] + '1_001.fastq.gz ' +
workingFolder + '/' + file[0][0] + '2_001.fastq.gz')
# Performing bam sorting using samtools...
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Performing bam sorting for sample '
+ file[0][1] + ' using samtools sort module')
proc4 = subprocess.Popen(['samtools', 'sort', workingFolder + '/' + file[0][1] +
'.uncompressed.bam', workingFolder + '/' + file[0][1]],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc4.communicate()
logging.info('samtools sort ' + workingFolder + '/' + file[0][1] +
'.uncompressed.bam ' + workingFolder + '/' + file[0][1])
# Performing bam indexing using samtools...
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Performing samtools indexing for sample '
+ file[0][1] + ' using samtools index module')
proc5 = subprocess.Popen(['samtools', 'index', workingFolder + '/' + file[0][1]+'.bam'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc5.communicate()
logging.info('samtools index ' + workingFolder + '/' + file[0][1]+'.bam')
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Analysing Depth of Coverage for each '
'gene segments of sample ' + file[0][1] + ' using GATK DepthOfCoverage')
proc6 = subprocess.Popen(['java', '-Xmx4g', '-jar', '/home/hklee/Software/GenomeAnalysisTK.jar', '-T',
'DepthOfCoverage', '-R', fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa',
'-o', workingFolder + '/' + 'MS2SysCtrl_base',
'-I', workingFolder + '/' + file[0][1]+'.bam', '-omitIntervals',
'-omitSampleSummary'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc6.communicate()
logging.info('java -Xmx4g -jar /home/hklee/Software/GenomeAnalysisTK.jar -T DepthOfCoverage -R'
+ fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa -o ' + workingFolder + '/'
+ 'MS2SysCtrl_base -I ' + workingFolder + '/' + file[0][1]
+ '.bam -omitIntervals -omitSampleSummary')
# Housekeeping...
os.unlink(workingFolder + '/' + file[0][0] + '1_001.sai')
os.unlink(workingFolder + '/' + file[0][0] + '2_001.sai')
os.unlink(workingFolder + '/' + file[0][1] + '.uncompressed.bam')
os.unlink(workingFolder + '/' + file[0][1]+'.bam')
os.unlink(workingFolder + '/' + file[0][1]+'.bam.bai')
dataFromTable = pd.read_table(workingFolder + '/MS2SysCtrl_base', sep='\t')
columns_tojoin = dataFromTable[('Locus')].str.split(":").apply(Series, 1)
columns_tojoin.columns = ['CHROM', 'POS']
dataFromTable = pd.merge(dataFromTable, columns_tojoin, left_index=True, right_index=True)
dataFromTable = dataFromTable.set_index(['CHROM'], drop=False)
try:
bsObj = BeautifulSoup(open(workingFolder + '/ResequencingRunStatistics.xml'), 'lxml-xml')
except IOError:
errorFile = open('Error.txt', 'a')
errorFile.write('Please make sure the run-specific ResequencingRunStatistics.xml is placed in the run folder'
+ '\n')
errorFile.close()
exit(1)
xmlOfSamples = bsObj.findAll('SummarizedSampleStatistics')
if not xmlOfSamples:
xmlOfSamples = bsObj.findAll('SummarizedSampleStatisics')
listOfSamples = []
for name in xmlOfSamples:
listOfSamples.append(name.SampleName.get_text())
listOfPFReads = []
for name in xmlOfSamples:
listOfPFReads.append(round(int(name.NumberOfClustersPF.get_text())/1000000, 2))
if len(listOfSamples) == len(listOfPFReads):
dictOfSamplesAndPFreads = dict(zip(listOfSamples, listOfPFReads))
geneList = []
def geneAnalysis(Chrom, Gene):
GeneData = dataFromTable.ix[Chrom]
GeneData.is_copy = False
GeneData['PFreads_inMillion'] = dictOfSamplesAndPFreads['MS2SysCtrl']
GeneData['DateOfRun'] = dateOfRun
GeneData['GeneSegment'] = Gene
GeneData = GeneData.set_index(['POS'], drop=False)
geneSeq = list(dictOfGeneSegment[Chrom])
for k in range(0, len(geneSeq), 150):
GeneData_a = GeneData.ix[[k],['DateOfRun', 'CHROM', 'GeneSegment', 'POS', 'Total_Depth', 'PFreads_inMillion']]
geneList.append(GeneData_a)
return
with open(fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa' ,'r') as refFile:
content1 = refFile.read().splitlines()
geneName = []
for i in range(len(content1)):
if content1.index(content1[i]) % 2 == 0 or content1.index(content1[i]) == 0:
geneName.append(content1[i][1:])
geneCont = []
for i in range(len(content1)):
if content1.index(content1[i]) % 2 != 0:
geneCont.append(content1[i])
if len(geneName) == len(geneCont):
dictOfGeneSegment = dict(zip(geneName, geneCont))
with open(fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa','r') as refFile:
sequences = refFile.read()
segmentRegex = re.compile(r'((Segment\d)_[\S]*)')
segment = segmentRegex.findall(sequences)
for seg in segment:
try:
geneAnalysis(seg[0], seg[1])
except:
errorFile = open('Error.txt', 'a')
errorFile.write(seg[1] + ' of MS2 System Control was not analysed. Suggested solution: Something is wrong '
'with the formatting of the AllGenes.xls file' + '\n')
errorFile.close()
try:
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Updating existing ms2SysCtrl.db')
dataBase = pd.read_csv(fluSeqFolder + 'ms2SysCtrl.db', sep=',')
dataBase_tmp = pd.concat(geneList, axis=0)
with open(fluSeqFolder + 'ms2SysCtrl.db', 'a') as f:
dataBase_tmp.to_csv(f, header=False)
except OSError:
print('Warning: The MS2 contamination database file is not found in the designated directory. '
'A new database will be created. Please note that it is important to have sufficient data in the database '
'to provide a confident contamination statistics for data analyses')
errorFile = open('Error.txt', 'a')
errorFile.write('Warning: The MS2 contamination database file is not found in the designated directory. A new '
'database will be created. Please note that it is important to have sufficient data in the '
'database to provide a confident contamination statistics for data analyses.\n')
errorFile.close()
pd.concat(geneList, axis=0).to_csv(fluSeqFolder + 'ms2SysCtrl.db')
# Housekeeping...
try:
os.unlink(workingFolder + '/MS2SysCtrl_base.sample_cumulative_coverage_counts')
except OSError:
print(workingFolder + '/MS2SysCtrl_base.sample_cumulative_coverage_counts is not found')
try:
os.unlink(workingFolder + '/MS2SysCtrl_base.sample_cumulative_coverage_proportions')
except OSError:
print(workingFolder + '/MS2SysCtrl_base.sample_cumulative_coverage_proportions is not found')
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- MS2 System contamination learning done ')
| mit |
Jimmy-Morzaria/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 13 | 43295 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_auto(self):
# partial_fit with class_weight='auto' not supported
assert_raises_regexp(ValueError,
"class_weight 'auto' is not supported for "
"partial_fit. In order to use 'auto' weights, "
"use compute_class_weight\('auto', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='auto').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_auto_weight(self):
# Test class weights for imbalanced data
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001, n_iter=1000,
class_weight="auto", shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000, class_weight="auto", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="auto", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
grg2rsr/line_scan_traces_extractor | interactive_traces_extration.py | 1 | 6843 | # -*- coding: utf-8 -*-
"""
written by Georg Raiser 20.10.2015
questions/bugs to [email protected]
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import scipy as sp
import sys
import os
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import tifffile
class interactive_traces_extract(object):
"""
interactively extract traces from line scan images. Image has to be in
the format of x: time, y: line index, so each horizontal line of pixels
represents one scan of the traces, the line below is the next etc.
"""
def __init__(self,image_path,prestim_frames=None):
self.path = image_path
## ini data
self.data = self.read_image(self.path)
self.nLines = self.data.shape[0]
self.nPlaces = self.data.shape[1]
if prestim_frames:
Fstart,Fstop = prestim_frames
bck = sp.average(self.data[Fstart:Fstop,:],axis=0)[sp.newaxis,:]
self.data = (self.data - bck) / bck
## ini UI
# Image
im_params = {'interpolation':'none',
'cmap':'jet',
'extent':[0,self.nPlaces,self.nLines,0],
'origin':'upper',
'aspect':sp.float32(self.data.shape[1]) / self.data.shape[0]}
AxesImage = plt.imshow(self.data,**im_params)
self.im_ax = AxesImage.axes
self.im_fig = AxesImage.figure
self.im_ax.set_xlabel('place [px]')
self.im_ax.set_ylabel('line number')
# coordinate calc
self.pos = int(self.nPlaces/2) # is the position of the mouse pointer
self.width = 11 # is x1 - x0
self.xs = self.calc_x(self.pos,self.width) # is a tuple (x0,x1) along which is sliced
# add patch
rect_params = {'facecolor':'red',
'alpha':0.5}
self.Rect = Rectangle(self.xs,self.width,self.nLines,**rect_params)
self.im_ax.add_patch(self.Rect)
# extracted traces preview
self.traces_fig = plt.figure()
self.traces_ax = self.traces_fig.add_subplot(111)
tempTrace_params = {'linewidth':2,
'color':'red'}
self.tempTrace, = self.traces_ax.plot(sp.zeros(self.nLines),**tempTrace_params)
self.traces_ax.set_xlabel('line number')
if prestim_frames:
self.traces_ax.set_ylabel('dF/F')
else:
self.traces_ax.set_ylabel('intensity [au]')
## extracting info
self.coords = []
self.traces = []
# hooking up the interactive handles
self.im_fig.canvas.mpl_connect('button_press_event', self.mouse_clicked_event)
self.im_fig.canvas.mpl_connect('scroll_event',self.scroll_event)
self.im_fig.canvas.mpl_connect('motion_notify_event',self.mouse_moved_event)
self.im_fig.canvas.mpl_connect('close_event', self.close_event)
plt.show()
pass
### input output
def read_image(self,path):
""" dummy reader, to be extended for other file formats """
return tifffile.imread(path)
def write_output(self):
""" write ouput upon closing the image figure """
outpath = os.path.splitext(self.path)[0]+'_traces.csv'
if len(self.coords) > 0:
print 'writing to ' + outpath
coors = pd.DataFrame(sp.array(self.coords).T,index=['x0','x1'])
values = pd.DataFrame(sp.vstack(self.traces).T)
Df = pd.concat([coors,values])
Df.to_csv(outpath)
else:
print "exiting without saving anything"
def close_event(self,event):
self.write_output()
plt.close('all')
def calc_x(self,pos,width):
""" calculate x0, x1 (slice extent) based on current pos and width """
if width == 1:
x0 = pos
x1 = pos + 1
else:
x0 = pos - (width-1)/2
x1 = pos + (width-1)/2
return (x0,x1)
def scroll_event(self,event):
""" changes width of slice """
if event.button == 'up':
self.width += 2
if event.button == 'down':
self.width -= 2
self.width = sp.clip(self.width,1,self.nPlaces)
self.xs = self.calc_x(self.pos,self.width)
self.Rect.set_xy((self.xs[0] ,0))
self.Rect.set_width(self.width)
self.update()
def mouse_moved_event(self,event):
""" x position of mouse determines center of slice """
if event.inaxes == self.im_ax:
self.pos = int(event.xdata)
self.update()
def mouse_clicked_event(self,event):
""" middle button click slices """
if event.button==2:
self.coords.append(self.xs)
self.traces.append(self.slice_trace(*self.xs))
self.traces_ax.plot(self.slice_trace(*self.xs),lw=1,color='grey')
rect = Rectangle((self.xs[0],0),self.width,self.nLines,facecolor='grey',alpha=0.5)
self.im_ax.add_patch(rect)
def slice_trace(self,x0,x1):
sliced = sp.average(self.data[:,x0:x1],axis=1)
return sliced
def update(self):
""" UI update """
# calc new pos
self.xs = self.calc_x(self.pos,self.width)
# update rect
self.Rect.set_xy((self.xs[0] ,0))
# get new slice
self.tempTrace.set_ydata(self.slice_trace(*self.xs))
# update traces preview
self.traces_ax.relim()
self.traces_ax.autoscale_view(True,True,True)
self.traces_fig.canvas.draw()
# update figure
self.im_fig.canvas.draw()
if __name__ == '__main__':
### testing
# path = '/home/georg/python/line_scan_traces_extractor/test_data/ant2_L1_Sum17.lsm'
# prestim_frames = (0,40)
doc = """ A small python tool to interactively extract time traces from functional imaging line scans. The Image data shape has to be x = place and y = repetition. Usage: run this file with the following arguments <path_to_file> <start> <stop>. Start and stop are optional and mark the index bounds used for background used in dF/F calculation. If ommited, raw values are used."""
### nontesting
if len(sys.argv) == 1:
print doc
sys.exit()
if len(sys.argv) == 2:
path = sys.argv[1]
prestim_frames = None
if len(sys.argv) == 4:
path = sys.argv[1]
prestim_frames = (int(sys.argv[2]),int(sys.argv[3]))
interactive_traces_extract(path,prestim_frames=prestim_frames)
| gpl-2.0 |
geektoni/Influenza-Like-Illness-Predictor | data_analysis/compare_pageviews_pagecounts.py | 1 | 3253 | import glob
import os
import argparse
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=1.2)
def generate_dataframe(country, data="new_data"):
path = "../data/wikipedia_{}/{}".format(country, data)
files = [os.path.basename(f) for f in glob.glob(path + "/*.csv", recursive=True)]
years = [f.split(".")[0] for f in files]
years.sort()
df = pd.DataFrame()
for y in years:
if int(y) >= 2015 and data != "pageviews" and data != "old_data" and data != "cyclerank_pageviews":
continue
tmp = pd.read_csv(path + "/" + y + ".csv")
df = pd.concat([df, tmp], ignore_index=True)
# Fill nan values
df.fillna(0, inplace=True)
# Sum together all the pageviews
total_pageviews = df.sum(axis=1).to_frame()
total_pageviews.rename(columns={0: "pagecounts_unorm"}, inplace=True)
total_pageviews.reset_index(inplace=True)
total_pageviews["week"] = df["Week"]
# Remove frames with zero counts
indexes = total_pageviews[total_pageviews.pagecounts_unorm == 0].index
total_pageviews.drop(indexes, inplace=True)
total_pageviews.reset_index(inplace=True)
# Normalize the data
scaler = MinMaxScaler()
total_pageviews["pagecounts"] = scaler.fit_transform(total_pageviews["pagecounts_unorm"].values.reshape(-1,1))
return total_pageviews
def get_label(country):
if country == "italy":
return "Italian"
elif country == "germany":
return "German"
else:
return "Dutch"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default="pageviews")
parser.add_argument("--legend", action="store_true", default=False)
args = parser.parse_args()
countries = ["italy", "germany", "netherlands"]
# Generate figure
fig = plt.figure(figsize=(8,4))
# Set the weeks
step = 20
weeks = []
max_weeks = 0
total_observations = 0
for c in countries:
df = generate_dataframe(c, args.data)
# Plot the data
sns.lineplot(data=df["pagecounts"], label=get_label(c), legend=False)
# Set the weeks we need to plot
if max_weeks < len(df["week"]):
weeks=[]
counter=0
for e in df["week"].to_list():
if counter%step == 0:
weeks.append(e)
counter += 1
max_weeks = len(df["week"])
# Set the max number of observations
total_observations = len(df["pagecounts"]) if len(df["pagecounts"]) > total_observations else total_observations
# Print the xticks
plt.xticks(np.arange(0, total_observations, step=step), weeks, rotation=90)
# Shrink current axis by 20%
if args.legend:
ax = fig.axes[0]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
#plt.show()
plt.savefig("pageviews-numerosity-{}.png".format(args.data), dpi=300, bbox_inches='tight')
| mit |
hrjn/scikit-learn | sklearn/model_selection/_split.py | 7 | 68700 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..utils.random import choice
from ..base import _pprint
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'RepeatedStratifiedKFold',
'RepeatedKFold',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(GroupKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
See also
--------
RepeatedStratifiedKFold: Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, groups=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_groups for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of groups for any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedKFold, self).split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> print(logo)
LeaveOneGroupOut()
>>> for train_index, test_index in logo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
return len(np.unique(groups))
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpgo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1))
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
X, y, groups = indexable(X, y, groups)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
class _RepeatedSplits(with_metaclass(ABCMeta)):
"""Repeated splits for an arbitrary randomized CV splitter.
Repeats splits for cross-validators n times with different randomization
in each repetition.
Parameters
----------
cv : callable
Cross-validator class.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
**cvargs : additional params
Constructor parameters for cv. Must not contain random_state
and shuffle.
"""
def __init__(self, cv, n_repeats=10, random_state=None, **cvargs):
if not isinstance(n_repeats, (np.integer, numbers.Integral)):
raise ValueError("Number of repetitions must be of Integral type.")
if n_repeats <= 1:
raise ValueError("Number of repetitions must be greater than 1.")
if any(key in cvargs for key in ('random_state', 'shuffle')):
raise ValueError(
"cvargs must not contain random_state or shuffle.")
self.cv = cv
self.n_repeats = n_repeats
self.random_state = random_state
self.cvargs = cvargs
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
for train_index, test_index in cv.split(X, y, groups):
yield train_index, test_index
class RepeatedKFold(_RepeatedSplits):
"""Repeated K-Fold cross validator.
Repeats K-Fold n times with different randomization in each repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> from sklearn.model_selection import RepeatedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124)
>>> for train_index, test_index in rkf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [0 1] TEST: [2 3]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
See also
--------
RepeatedStratifiedKFold: Repeates Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedKFold, self).__init__(
KFold, n_repeats, random_state, n_splits=n_splits)
class RepeatedStratifiedKFold(_RepeatedSplits):
"""Repeated Stratified K-Fold cross validator.
Repeats Stratified K-Fold n times with different randomization in each
repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> from sklearn.model_selection import RepeatedStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2,
... random_state=36851234)
>>> for train_index, test_index in rskf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
See also
--------
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedStratifiedKFold, self).__init__(
StratifiedKFold, n_repeats, random_state, n_splits=n_splits)
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the test split. If
int, represents the absolute number of test groups. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_splits=5, test_size=0.2, train_size=None,
random_state=None):
super(GroupShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super(
GroupShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> from sklearn.model_selection._split import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedShuffleSplit, self).split(X, y, groups)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and
test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and
train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the class labels.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
Carrotsmile/CS428 | steerstats/tools/plotting/animating/anim_scatter.py | 8 | 3367 | """
Matplotlib Animation Example
author: Jake Vanderplas
email: [email protected]
website: http://jakevdp.github.com
license: BSD
Please feel free to use and modify this, but keep the above information. Thanks!
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import random
import sys
import csv
filename = sys.argv[1]
data = []
for i in range(1, int(sys.argv[2])):
tmp_filename = filename + str(i) + ".csv"
csvfile = open(tmp_filename, 'r')
spamreader = csv.reader(csvfile, delimiter=',')
tmp_data = []
for row in spamreader:
tmp_data.append([float(row[0]), float(row[1]), float(row[2])])
# tmp_data.append([float(row[0]), float(row[1]), float(row[2])])
data.append(tmp_data)
data = np.array(data)
up = 2
low = 0
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
ax = fig.add_subplot(231)
ax.set_xlabel('Efficency Metric')
ax.set_ylabel('PLE Metric')
ax.set_xlim([0.9,1])
ax.set_ylim([0.1,0.9])
ax2 = fig.add_subplot(232)
ax2.set_xlabel('PLE Metric')
ax2.set_ylabel('Entropy Metric')
ax2.set_ylim([1,4])
ax2.set_xlim([0,1])
ax3 = fig.add_subplot(233)
ax3.set_xlabel('Efficency Metric')
ax3.set_ylabel('Entropy Metric')
ax3.set_ylim([1.5,3])
ax3.set_xlim([0.9,1])
ax4 = fig.add_subplot(212, projection='3d')
ax4.set_xlabel('Efficency Metric')
ax4.set_ylabel('PLE Metric')
ax4.set_zlabel('Entropy Metric')
ax4.set_xlim([0.9,1])
ax4.set_ylim([0.1,0.8])
ax4.set_zlim([1.0,4])
# ax = plt.axes(xlim=(low, up), ylim=(low, up))
# ax = plt.axes(xlim=(0.9, 1.0), ylim=(0, 1))
scat1 = ax.scatter([3], [4], c="b")
scat2 = ax2.scatter([3], [4], c="b")
scat3 = ax3.scatter([3], [4], c="b")
scat4 = ax4.scatter([3, 4], [4, 5], [5, 6], c="b")
# initialization function: plot the background of each frame
def init():
print "paths"
# print scat.get_paths()
# sys.exit()
# scat.set_paths(matplotlib.path.Path([[2, 3]]))
return scat1, scat2, scat3, scat4
# animation function. This is called sequentially
def animate(i):
tmp_data=data[i]
# print tmp_data[:, 1:3]
scat1.set_offsets(tmp_data[:, :2])
scat2.set_offsets(tmp_data[:, 1:3])
scat3.set_offsets(tmp_data[:, [0, 2]])
# scat4.set_offsets(tmp_data)
print scat4._offsets3d
# scat4._offsets3d = (np.ma.ravel(tmp_data[:, 0]), np.ma.ravel(tmp_data[:, 1]), np.ma.ravel(tmp_data[:, 2]))
scat4._offsets3d = (tuple(tmp_data[:, 0]), tuple(tmp_data[:, 1]), (tmp_data[:, 2]))
# scat4._offsets3d = tmp_data
# scat4.set_offsets(tmp_data[:,:2])
# scat4.set_3d_properties(tmp_data[:,2],'z')
plt.draw()
return scat1, scat2, scat3, scat4
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=200, interval=100, blit=True)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
# anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
plt.show() | gpl-3.0 |
dgwakeman/mne-python | mne/viz/circle.py | 13 | 15446 | """Functions to plot on circle as for connectivity
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
#
# License: Simplified BSD
from itertools import cycle
from functools import partial
import numpy as np
from ..externals.six import string_types
from ..fixes import tril_indices, normalize_colors
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
"""Create layout arranging nodes on a circle.
Parameters
----------
node_names : list of str
Node names.
node_order : list of str
List with node names defining the order in which the nodes are
arranged. Must have the elements as node_names but the order can be
different. The nodes are arranged clockwise starting at "start_pos"
degrees.
start_pos : float
Angle in degrees that defines where the first node is plotted.
start_between : bool
If True, the layout starts with the position between the nodes. This is
the same as adding "180. / len(node_names)" to start_pos.
group_boundaries : None | array-like
List of of boundaries between groups at which point a "group_sep" will
be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
group_sep : float
Group separation angle in degrees. See "group_boundaries".
Returns
-------
node_angles : array, shape=(len(node_names,))
Node angles in degrees.
"""
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
# convert it to a list with indices
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
# special case when a group separator is at the start
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
"""Isolates connections around a single node when user left clicks a node.
On right click, resets all connections."""
if event.inaxes != axes:
return
if event.button == 1: # left click
# click must be near node radius
if not ylim[0] <= event.ydata <= ylim[1]:
return
# all angles in range [0, 2*pi]
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3: # right click
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Note: This code is based on the circle graph example by Nicolas P. Rougier
http://www.labri.fr/perso/nrougier/coding/.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of arrays | None
Two arrays with indices of connections for which the connections
strenghts are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape=(len(node_names,)) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuples | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : 2-tuple
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.pyplot.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | 3-tuple
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure handle.
axes : instance of matplotlib.axes.PolarAxesSubplot
The subplot handle.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
node_colors = [plt.cm.spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, string_types):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additonal space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
con_abs = con_abs[sort_idx]
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initalize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(seed=0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
norm = normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
if show:
plt.show()
return fig, axes
| bsd-3-clause |
jeffzheng1/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 56 | 1959 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
def optimizer_exp_decay():
global_step = tf.contrib.framework.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer_exp_decay)
classifier.fit(x_train, y_train, steps=800)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
jdmcbr/blaze | blaze/expr/reductions.py | 10 | 8915 | from __future__ import absolute_import, division, print_function
import datashape
from datashape import Record, DataShape, dshape, TimeDelta
from datashape import coretypes as ct
from datashape.predicates import iscollection, isboolean, isnumeric, isdatelike
from numpy import inf
from odo.utils import copydoc
import toolz
from .core import common_subexpression
from .expressions import Expr, ndim
from .strings import isstring
from .expressions import dshape_method_list, method_properties
class Reduction(Expr):
""" A column-wise reduction
Blaze supports the same class of reductions as NumPy and Pandas.
sum, min, max, any, all, mean, var, std, count, nunique
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = t['amount'].sum()
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> compute(e, data)
350
"""
__slots__ = '_hash', '_child', 'axis', 'keepdims'
def __init__(self, _child, axis=None, keepdims=False):
self._child = _child
if axis is None:
axis = tuple(range(_child.ndim))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
axis = tuple(sorted(axis))
self.axis = axis
self.keepdims = keepdims
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
return DataShape(*(shape + (self.schema,)))
@property
def schema(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
result = toolz.first(schema.types)
else:
result = schema
return DataShape(result)
@property
def symbol(self):
return type(self).__name__
@property
def _name(self):
child_name = self._child._name
if child_name is None or child_name == '_':
return type(self).__name__
else:
return '%s_%s' % (child_name, type(self).__name__)
def __str__(self):
kwargs = list()
if self.keepdims:
kwargs.append('keepdims=True')
if self.axis != tuple(range(self._child.ndim)):
kwargs.append('axis=' + str(self.axis))
other = sorted(
set(self.__slots__[1:]) - set(['_child', 'axis', 'keepdims']))
for slot in other:
kwargs.append('%s=%s' % (slot, getattr(self, slot)))
name = type(self).__name__
if kwargs:
return '%s(%s, %s)' % (name, self._child, ', '.join(kwargs))
else:
return '%s(%s)' % (name, self._child)
class any(Reduction):
schema = dshape(ct.bool_)
class all(Reduction):
schema = dshape(ct.bool_)
class sum(Reduction):
@property
def schema(self):
return DataShape(datashape.maxtype(super(sum, self).schema))
class max(Reduction):
pass
class min(Reduction):
pass
class mean(Reduction):
schema = dshape(ct.real)
class var(Reduction):
"""Variance
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute an unbiased estimate of the population variance if this is
``True``. In NumPy and pandas, this parameter is called ``ddof`` (delta
degrees of freedom) and is equal to 1 for unbiased and 0 for biased.
"""
__slots__ = '_hash', '_child', 'unbiased', 'axis', 'keepdims'
schema = dshape(ct.real)
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
super(var, self).__init__(child, *args, **kwargs)
class std(Reduction):
"""Standard Deviation
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute the square root of an unbiased estimate of the population
variance if this is ``True``.
.. warning::
This does *not* return an unbiased estimate of the population
standard deviation.
See Also
--------
var
"""
__slots__ = '_hash', '_child', 'unbiased', 'axis', 'keepdims'
schema = dshape(ct.real)
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
super(std, self).__init__(child, *args, **kwargs)
class count(Reduction):
""" The number of non-null elements """
schema = dshape(ct.int32)
class nunique(Reduction):
schema = dshape(ct.int32)
class nelements(Reduction):
"""Compute the number of elements in a collection, including missing values.
See Also
---------
blaze.expr.reductions.count: compute the number of non-null elements
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: float64}')
>>> t[t.amount < 1].nelements()
nelements(t[t.amount < 1])
"""
schema = dshape(ct.int32)
def nrows(expr):
return nelements(expr, axis=(0,))
class Summary(Expr):
""" A collection of named reductions
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = summary(number=t.id.nunique(), sum=t.amount.sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 1]]
>>> from blaze import compute
>>> compute(expr, data)
(2, 350)
"""
__slots__ = '_hash', '_child', 'names', 'values', 'axis', 'keepdims'
def __init__(self, _child, names, values, axis=None, keepdims=False):
self._child = _child
self.names = names
self.values = values
self.keepdims = keepdims
self.axis = axis
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
measure = Record(list(zip(self.names,
[v.schema for v in self.values])))
return DataShape(*(shape + (measure,)))
def __str__(self):
s = 'summary('
s += ', '.join('%s=%s' % (name, str(val))
for name, val in zip(self.fields, self.values))
if self.keepdims:
s += ', keepdims=True'
s += ')'
return s
@copydoc(Summary)
def summary(keepdims=False, axis=None, **kwargs):
items = sorted(kwargs.items(), key=toolz.first)
names = tuple(map(toolz.first, items))
values = tuple(map(toolz.second, items))
child = common_subexpression(*values)
if len(kwargs) == 1 and not iscollection(child.dshape):
while not iscollection(child.dshape):
children = [i for i in child._inputs if isinstance(i, Expr)]
if len(children) == 1:
child = children[0]
else:
child = common_subexpression(*children)
if axis is None:
axis = tuple(range(ndim(child)))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
return Summary(child, names, values, keepdims=keepdims, axis=axis)
def vnorm(expr, ord=None, axis=None, keepdims=False):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == inf:
return max(abs(expr), axis=axis, keepdims=keepdims)
elif ord == -inf:
return min(abs(expr), axis=axis, keepdims=keepdims)
elif ord == 1:
return sum(abs(expr), axis=axis, keepdims=keepdims)
elif ord % 2 == 0:
return sum(expr ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
return sum(abs(expr) ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
dshape_method_list.extend([
(iscollection, set([count, nelements])),
(lambda ds: (iscollection(ds) and
(isstring(ds) or isnumeric(ds) or isboolean(ds) or
isdatelike(ds) or isinstance(ds, TimeDelta))),
set([min, max])),
(lambda ds: len(ds.shape) == 1,
set([nrows, nunique])),
(lambda ds: iscollection(ds) and isboolean(ds),
set([any, all])),
(lambda ds: iscollection(ds) and (isnumeric(ds) or isboolean(ds)),
set([mean, sum, std, var, vnorm])),
])
method_properties.update([nrows])
| bsd-3-clause |
CforED/Machine-Learning | sklearn/cluster/tests/test_k_means.py | 41 | 27789 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.exceptions import DataConversionWarning
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1, init=lambda X_, k, random_state: X_[:, :2], n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1, init=lambda X_, k, random_state: X_[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1)
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be', km.fit, X)
| bsd-3-clause |
zzcclp/spark | python/pyspark/pandas/series.py | 9 | 197528 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark Column to behave similar to pandas Series.
"""
import datetime
import re
import inspect
import sys
from collections.abc import Mapping
from functools import partial, wraps, reduce
from typing import (
Any,
Callable,
Dict,
Generic,
IO,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
no_type_check,
overload,
TYPE_CHECKING,
)
import numpy as np
import pandas as pd
from pandas.core.accessor import CachedAccessor
from pandas.io.formats.printing import pprint_thing
from pandas.api.types import is_list_like, is_hashable
from pandas.api.extensions import ExtensionDtype
from pandas.tseries.frequencies import DateOffset
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame
from pyspark.sql.types import (
ArrayType,
BooleanType,
DataType,
DoubleType,
FloatType,
IntegerType,
IntegralType,
LongType,
NumericType,
Row,
StructType,
)
from pyspark.sql.window import Window
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, Dtype, Label, Name, Scalar, T
from pyspark.pandas.accessors import PandasOnSparkSeriesMethods
from pyspark.pandas.categorical import CategoricalAccessor
from pyspark.pandas.config import get_option
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.exceptions import SparkPandasIndexingError
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.generic import Frame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
DEFAULT_SERIES_NAME,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_INDEX_NAME,
SPARK_DEFAULT_SERIES_NAME,
)
from pyspark.pandas.missing.series import MissingPandasLikeSeries
from pyspark.pandas.plot import PandasOnSparkPlotAccessor
from pyspark.pandas.ml import corr
from pyspark.pandas.utils import (
combine_frames,
is_name_like_tuple,
is_name_like_value,
name_like_string,
same_anchor,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
validate_bool_kwarg,
verify_temp_column_name,
SPARK_CONF_ARROW_ENABLED,
)
from pyspark.pandas.datetimes import DatetimeMethods
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.accessors import SparkSeriesMethods
from pyspark.pandas.strings import StringMethods
from pyspark.pandas.typedef import (
infer_return_type,
spark_type_to_pandas_dtype,
ScalarType,
SeriesType,
)
if TYPE_CHECKING:
from pyspark.sql._typing import ColumnOrName # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import SeriesGroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
# This regular expression pattern is complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ in Series.
# This pattern basically seeks the footer string from pandas'
REPR_PATTERN = re.compile(r"Length: (?P<length>[0-9]+)")
_flex_doc_SERIES = """
Return {desc} of series and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``
Parameters
----------
other : Series or scalar value
Returns
-------
Series
The result of the operation.
See Also
--------
Series.{reverse}
{series_examples}
"""
_add_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.add(df.b)
a 4.0
b NaN
c 6.0
d NaN
dtype: float64
>>> df.a.radd(df.b)
a 4.0
b NaN
c 6.0
d NaN
dtype: float64
"""
_sub_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.subtract(df.b)
a 0.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rsub(df.b)
a 0.0
b NaN
c -2.0
d NaN
dtype: float64
"""
_mul_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.multiply(df.b)
a 4.0
b NaN
c 8.0
d NaN
dtype: float64
>>> df.a.rmul(df.b)
a 4.0
b NaN
c 8.0
d NaN
dtype: float64
"""
_div_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.divide(df.b)
a 1.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rdiv(df.b)
a 1.0
b NaN
c 0.5
d NaN
dtype: float64
"""
_pow_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.pow(df.b)
a 4.0
b NaN
c 16.0
d NaN
dtype: float64
>>> df.a.rpow(df.b)
a 4.0
b NaN
c 16.0
d NaN
dtype: float64
"""
_mod_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.mod(df.b)
a 0.0
b NaN
c 0.0
d NaN
dtype: float64
>>> df.a.rmod(df.b)
a 0.0
b NaN
c 2.0
d NaN
dtype: float64
"""
_floordiv_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.floordiv(df.b)
a 1.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rfloordiv(df.b)
a 1.0
b NaN
c 0.0
d NaN
dtype: float64
"""
# Needed to disambiguate Series.str and str type
str_type = str
def _create_type_for_series_type(param: Any) -> Type[SeriesType]:
from pyspark.pandas.typedef import NameTypeHolder
if isinstance(param, ExtensionDtype):
new_class = type("NameType", (NameTypeHolder,), {}) # type: Type[NameTypeHolder]
new_class.tpe = param
else:
new_class = param.type if isinstance(param, np.dtype) else param
return SeriesType[new_class] # type: ignore
if (3, 5) <= sys.version_info < (3, 7) and __name__ != "__main__":
from typing import GenericMeta # type: ignore
old_getitem = GenericMeta.__getitem__ # type: ignore
@no_type_check
def new_getitem(self, params):
if hasattr(self, "is_series"):
return old_getitem(self, _create_type_for_series_type(params))
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class Series(Frame, IndexOpsMixin, Generic[T]):
"""
pandas-on-Spark Series that corresponds to pandas Series logically. This holds Spark Column
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: InternalFrame
:ivar _psdf: Parent's pandas-on-Spark DataFrame
:type _psdf: ps.DataFrame
Parameters
----------
data : array-like, dict, or scalar value, pandas Series
Contains data stored in Series
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a pandas Series, other arguments should not be used.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
@no_type_check
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False):
assert data is not None
if isinstance(data, DataFrame):
assert dtype is None
assert name is None
assert not copy
assert not fastpath
self._anchor = data # type: DataFrame
self._col_label = index # type: Label
else:
if isinstance(data, pd.Series):
assert index is None
assert dtype is None
assert name is None
assert not copy
assert not fastpath
s = data
else:
s = pd.Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath
)
internal = InternalFrame.from_pandas(pd.DataFrame(s))
if s.name is None:
internal = internal.copy(column_labels=[None])
anchor = DataFrame(internal)
self._anchor = anchor
self._col_label = anchor._internal.column_labels[0]
object.__setattr__(anchor, "_psseries", {self._column_label: self})
@property
def _psdf(self) -> DataFrame:
return self._anchor
@property
def _internal(self) -> InternalFrame:
return self._psdf._internal.select_column(self._column_label)
@property
def _column_label(self) -> Optional[Label]:
return self._col_label
def _update_anchor(self, psdf: DataFrame) -> None:
assert psdf._internal.column_labels == [self._column_label], (
psdf._internal.column_labels,
[self._column_label],
)
self._anchor = psdf
object.__setattr__(psdf, "_psseries", {self._column_label: self})
def _with_new_scol(self, scol: Column, *, field: Optional[InternalField] = None) -> "Series":
"""
Copy pandas-on-Spark Series with the new Spark Column.
:param scol: the new Spark Column
:return: the copied Series
"""
name = name_like_string(self._column_label)
internal = self._internal.copy(
data_spark_columns=[scol.alias(name)],
data_fields=[
field if field is None or field.struct_field is None else field.copy(name=name)
],
)
return first_series(DataFrame(internal))
spark = CachedAccessor("spark", SparkSeriesMethods)
@property
def dtypes(self) -> Dtype:
"""Return the dtype object of the underlying data.
>>> s = ps.Series(list('abc'))
>>> s.dtype == s.dtypes
True
"""
return self.dtype
@property
def axes(self) -> List["Index"]:
"""
Return a list of the row axis labels.
Examples
--------
>>> psser = ps.Series([1, 2, 3])
>>> psser.axes
[Int64Index([0, 1, 2], dtype='int64')]
"""
return [self.index]
# Arithmetic Operators
def add(self, other: Any) -> "Series":
return self + other
add.__doc__ = _flex_doc_SERIES.format(
desc="Addition",
op_name="+",
equiv="series + other",
reverse="radd",
series_examples=_add_example_SERIES,
)
def radd(self, other: Any) -> "Series":
return other + self
radd.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Addition",
op_name="+",
equiv="other + series",
reverse="add",
series_examples=_add_example_SERIES,
)
def div(self, other: Any) -> "Series":
return self / other
div.__doc__ = _flex_doc_SERIES.format(
desc="Floating division",
op_name="/",
equiv="series / other",
reverse="rdiv",
series_examples=_div_example_SERIES,
)
divide = div
def rdiv(self, other: Any) -> "Series":
return other / self
rdiv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Floating division",
op_name="/",
equiv="other / series",
reverse="div",
series_examples=_div_example_SERIES,
)
def truediv(self, other: Any) -> "Series":
return self / other
truediv.__doc__ = _flex_doc_SERIES.format(
desc="Floating division",
op_name="/",
equiv="series / other",
reverse="rtruediv",
series_examples=_div_example_SERIES,
)
def rtruediv(self, other: Any) -> "Series":
return other / self
rtruediv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Floating division",
op_name="/",
equiv="other / series",
reverse="truediv",
series_examples=_div_example_SERIES,
)
def mul(self, other: Any) -> "Series":
return self * other
mul.__doc__ = _flex_doc_SERIES.format(
desc="Multiplication",
op_name="*",
equiv="series * other",
reverse="rmul",
series_examples=_mul_example_SERIES,
)
multiply = mul
def rmul(self, other: Any) -> "Series":
return other * self
rmul.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Multiplication",
op_name="*",
equiv="other * series",
reverse="mul",
series_examples=_mul_example_SERIES,
)
def sub(self, other: Any) -> "Series":
return self - other
sub.__doc__ = _flex_doc_SERIES.format(
desc="Subtraction",
op_name="-",
equiv="series - other",
reverse="rsub",
series_examples=_sub_example_SERIES,
)
subtract = sub
def rsub(self, other: Any) -> "Series":
return other - self
rsub.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Subtraction",
op_name="-",
equiv="other - series",
reverse="sub",
series_examples=_sub_example_SERIES,
)
def mod(self, other: Any) -> "Series":
return self % other
mod.__doc__ = _flex_doc_SERIES.format(
desc="Modulo",
op_name="%",
equiv="series % other",
reverse="rmod",
series_examples=_mod_example_SERIES,
)
def rmod(self, other: Any) -> "Series":
return other % self
rmod.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Modulo",
op_name="%",
equiv="other % series",
reverse="mod",
series_examples=_mod_example_SERIES,
)
def pow(self, other: Any) -> "Series":
return self ** other
pow.__doc__ = _flex_doc_SERIES.format(
desc="Exponential power of series",
op_name="**",
equiv="series ** other",
reverse="rpow",
series_examples=_pow_example_SERIES,
)
def rpow(self, other: Any) -> "Series":
return other ** self
rpow.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Exponential power",
op_name="**",
equiv="other ** series",
reverse="pow",
series_examples=_pow_example_SERIES,
)
def floordiv(self, other: Any) -> "Series":
return self // other
floordiv.__doc__ = _flex_doc_SERIES.format(
desc="Integer division",
op_name="//",
equiv="series // other",
reverse="rfloordiv",
series_examples=_floordiv_example_SERIES,
)
def rfloordiv(self, other: Any) -> "Series":
return other // self
rfloordiv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Integer division",
op_name="//",
equiv="other // series",
reverse="floordiv",
series_examples=_floordiv_example_SERIES,
)
# create accessor for pandas-on-Spark specific methods.
pandas_on_spark = CachedAccessor("pandas_on_spark", PandasOnSparkSeriesMethods)
# keep the name "koalas" for backward compatibility.
koalas = CachedAccessor("koalas", PandasOnSparkSeriesMethods)
# Comparison Operators
def eq(self, other: Any) -> bool:
"""
Compare if the current value is equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a == 1
a True
b False
c False
d False
Name: a, dtype: bool
>>> df.b.eq(1)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self == other
equals = eq
def gt(self, other: Any) -> "Series":
"""
Compare if the current value is greater than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a > 1
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.gt(1)
a False
b False
c False
d False
Name: b, dtype: bool
"""
return self > other
def ge(self, other: Any) -> "Series":
"""
Compare if the current value is greater than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a >= 2
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.ge(2)
a False
b False
c False
d False
Name: b, dtype: bool
"""
return self >= other
def lt(self, other: Any) -> "Series":
"""
Compare if the current value is less than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a < 1
a False
b False
c False
d False
Name: a, dtype: bool
>>> df.b.lt(2)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self < other
def le(self, other: Any) -> "Series":
"""
Compare if the current value is less than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a <= 2
a True
b True
c False
d False
Name: a, dtype: bool
>>> df.b.le(2)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self <= other
def ne(self, other: Any) -> "Series":
"""
Compare if the current value is not equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a != 1
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.ne(1)
a False
b True
c False
d True
Name: b, dtype: bool
"""
return self != other
def divmod(self, other: Any) -> Tuple["Series", "Series"]:
"""
Return Integer division and modulo of series and other, element-wise
(binary operator `divmod`).
Parameters
----------
other : Series or scalar value
Returns
-------
2-Tuple of Series
The result of the operation.
See Also
--------
Series.rdivmod
"""
return self.floordiv(other), self.mod(other)
def rdivmod(self, other: Any) -> Tuple["Series", "Series"]:
"""
Return Integer division and modulo of series and other, element-wise
(binary operator `rdivmod`).
Parameters
----------
other : Series or scalar value
Returns
-------
2-Tuple of Series
The result of the operation.
See Also
--------
Series.divmod
"""
return self.rfloordiv(other), self.rmod(other)
def between(self, left: Any, right: Any, inclusive: bool = True) -> "Series":
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar or list-like
Left boundary.
right : scalar or list-like
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = ps.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = ps.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
# TODO: arg should support Series
# TODO: NaN and None
def map(self, arg: Union[Dict, Callable]) -> "Series":
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict``.
.. note:: make sure the size of the dictionary is not huge because it could
downgrade the performance or throw OutOfMemoryError due to a huge
expression within Spark. Consider the input as a functions as an
alternative instead in this case.
Parameters
----------
arg : function or dict
Mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``None``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``None``.
Examples
--------
>>> s = ps.Series(['cat', 'dog', None, 'rabbit'])
>>> s
0 cat
1 dog
2 None
3 rabbit
dtype: object
``map`` accepts a ``dict``. Values that are not found
in the ``dict`` are converted to ``None``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 None
3 None
dtype: object
It also accepts a function:
>>> def format(x) -> str:
... return 'I am a {}'.format(x)
>>> s.map(format)
0 I am a cat
1 I am a dog
2 I am a None
3 I am a rabbit
dtype: object
"""
if isinstance(arg, dict):
is_start = True
# In case dictionary is empty.
current = F.when(SF.lit(False), SF.lit(None).cast(self.spark.data_type))
for to_replace, value in arg.items():
if is_start:
current = F.when(self.spark.column == SF.lit(to_replace), value)
is_start = False
else:
current = current.when(self.spark.column == SF.lit(to_replace), value)
if hasattr(arg, "__missing__"):
tmp_val = arg[np._NoValue]
del arg[np._NoValue] # Remove in case it's set in defaultdict.
current = current.otherwise(SF.lit(tmp_val))
else:
current = current.otherwise(SF.lit(None).cast(self.spark.data_type))
return self._with_new_scol(current)
else:
return self.apply(arg)
@property
def shape(self) -> Tuple[int]:
"""Return a tuple of the shape of the underlying data."""
return (len(self),)
@property
def name(self) -> Name:
"""Return name of the Series."""
name = self._column_label
if name is not None and len(name) == 1:
return name[0]
else:
return name
@name.setter
def name(self, name: Name) -> None:
self.rename(name, inplace=True)
# TODO: Functionality and documentation should be matched. Currently, changing index labels
# taking dictionary and function to change index are not supported.
def rename(self, index: Optional[Name] = None, **kwargs: Any) -> "Series":
"""
Alter Series name.
Parameters
----------
index : scalar
Scalar will alter the ``Series.name`` attribute.
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
Returns
-------
Series
Series with name altered.
Examples
--------
>>> s = ps.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
"""
if index is None:
pass
elif not is_hashable(index):
raise TypeError("Series.name must be a hashable type")
elif not isinstance(index, tuple):
index = (index,)
name = name_like_string(index)
scol = self.spark.column.alias(name)
field = self._internal.data_fields[0].copy(name=name)
internal = self._internal.copy(
column_labels=[index],
data_spark_columns=[scol],
data_fields=[field],
column_label_names=None,
)
psdf = DataFrame(internal) # type: DataFrame
if kwargs.get("inplace", False):
self._col_label = index
self._update_anchor(psdf)
return self
else:
return first_series(psdf)
def rename_axis(
self, mapper: Optional[Any] = None, index: Optional[Any] = None, inplace: bool = False
) -> Optional["Series"]:
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper, index : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to the index values.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series.
Returns
-------
Series, or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Examples
--------
>>> s = ps.Series(["dog", "cat", "monkey"], name="animal")
>>> s # doctest: +NORMALIZE_WHITESPACE
0 dog
1 cat
2 monkey
Name: animal, dtype: object
>>> s.rename_axis("index").sort_index() # doctest: +NORMALIZE_WHITESPACE
index
0 dog
1 cat
2 monkey
Name: animal, dtype: object
**MultiIndex**
>>> index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> s = ps.Series([4, 4, 2], index=index, name='num_legs')
>>> s # doctest: +NORMALIZE_WHITESPACE
type name
mammal dog 4
cat 4
monkey 2
Name: num_legs, dtype: int64
>>> s.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE
class name
mammal cat 4
dog 4
monkey 2
Name: num_legs, dtype: int64
>>> s.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE
TYPE NAME
mammal cat 4
dog 4
monkey 2
Name: num_legs, dtype: int64
"""
psdf = self.to_frame().rename_axis(mapper=mapper, index=index, inplace=False)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
@property
def index(self) -> "ps.Index":
"""The index (axis labels) Column of the Series.
See Also
--------
Index
"""
return self._psdf.index
@property
def is_unique(self) -> bool:
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
>>> ps.Series([1, 2, 3]).is_unique
True
>>> ps.Series([1, 2, 2]).is_unique
False
>>> ps.Series([1, 2, 3, None]).is_unique
True
"""
scol = self.spark.column
# Here we check:
# 1. the distinct count without nulls and count without nulls for non-null values
# 2. count null values and see if null is a distinct value.
#
# This workaround is in order to calculate the distinct count including nulls in
# single pass. Note that COUNT(DISTINCT expr) in Spark is designed to ignore nulls.
return self._internal.spark_frame.select(
(F.count(scol) == F.countDistinct(scol))
& (F.count(F.when(scol.isNull(), 1).otherwise(None)) <= 1)
).collect()[0][0]
def reset_index(
self,
level: Optional[Union[int, Name, Sequence[Union[int, Name]]]] = None,
drop: bool = False,
name: Optional[Name] = None,
inplace: bool = False,
) -> Optional[Union["Series", DataFrame]]:
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column,
or when the index is meaningless and needs to be reset
to the default before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels from the index.
Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in the new DataFrame.
name : object, optional
The name to use for the column containing the original Series values.
Uses self.name by default. This argument is ignored when drop is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4], index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx 0
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
dtype: int64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace and not drop:
raise TypeError("Cannot reset_index inplace on a Series to create a DataFrame")
if drop:
psdf = self._psdf[[self.name]]
else:
psser = self
if name is not None:
psser = psser.rename(name)
psdf = psser.to_frame()
psdf = psdf.reset_index(level=level, drop=drop)
if drop:
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
else:
return psdf
def to_frame(self, name: Optional[Name] = None) -> DataFrame:
"""
Convert Series to DataFrame.
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
Examples
--------
>>> s = ps.Series(["a", "b", "c"])
>>> s.to_frame()
0
0 a
1 b
2 c
>>> s = ps.Series(["a", "b", "c"], name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
"""
if name is not None:
renamed = self.rename(name)
elif self._column_label is None:
renamed = self.rename(DEFAULT_SERIES_NAME)
else:
renamed = self
return DataFrame(renamed._internal)
to_dataframe = to_frame
def to_string(
self,
buf: Optional[IO[str]] = None,
na_rep: str = "NaN",
float_format: Optional[Callable[[float], str]] = None,
header: bool = True,
index: bool = True,
length: bool = False,
dtype: bool = False,
name: bool = False,
max_rows: Optional[int] = None,
) -> Optional[str]:
"""
Render a string representation of the Series.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header : boolean, default True
Add the Series header (index name)
index : bool, optional
Add index (row) labels, default True
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> print(df['dogs'].to_string())
0 0.2
1 0.0
2 0.6
3 0.2
>>> print(df['dogs'].to_string(max_rows=2))
0 0.2
1 0.0
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
psseries = self.head(max_rows)
else:
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_string, pd.Series.to_string, args
)
def to_clipboard(self, excel: bool = True, sep: Optional[str] = None, **kwargs: Any) -> None:
# Docstring defined below by reusing DataFrame.to_clipboard's.
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_clipboard, pd.Series.to_clipboard, args
)
to_clipboard.__doc__ = DataFrame.to_clipboard.__doc__
def to_dict(self, into: Type = dict) -> Mapping:
"""
Convert Series to {label -> value} dict or dict-like object.
.. note:: This method should only be used if the resulting pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s_dict = s.to_dict()
>>> sorted(s_dict.items())
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd) # doctest: +ELLIPSIS
defaultdict(<class 'list'>, {...})
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_dict, pd.Series.to_dict, args
)
def to_latex(
self,
buf: Optional[IO[str]] = None,
columns: Optional[List[Name]] = None,
col_space: Optional[int] = None,
header: bool = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[
Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]
] = None,
float_format: Optional[Callable[[float], str]] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
bold_rows: bool = False,
column_format: Optional[str] = None,
longtable: Optional[bool] = None,
escape: Optional[bool] = None,
encoding: Optional[str] = None,
decimal: str = ".",
multicolumn: Optional[bool] = None,
multicolumn_format: Optional[str] = None,
multirow: Optional[bool] = None,
) -> Optional[str]:
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_latex, pd.Series.to_latex, args
)
to_latex.__doc__ = DataFrame.to_latex.__doc__
def to_pandas(self) -> pd.Series:
"""
Return a pandas Series.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> df['dogs'].to_pandas()
0 0.2
1 0.0
2 0.6
3 0.2
Name: dogs, dtype: float64
"""
return self._to_internal_pandas().copy()
def to_list(self) -> List:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
.. note:: This method should only be used if the resulting list is expected
to be small, as all the data is loaded into the driver's memory.
"""
return self._to_internal_pandas().tolist()
tolist = to_list
def drop_duplicates(self, keep: str = "first", inplace: bool = False) -> Optional["Series"]:
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
Series
Series with duplicates dropped.
Examples
--------
Generate a Series with duplicated entries.
>>> s = ps.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s.sort_index()
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates().sort_index()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last').sort_index()
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries. Setting the value of 'inplace' to ``True`` performs
the operation inplace and returns ``None``.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s.sort_index()
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]].drop_duplicates(keep=keep)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def reindex(self, index: Optional[Any] = None, fill_value: Optional[Any] = None) -> "Series":
"""
Conform Series to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced.
Parameters
----------
index: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
Series with changed index.
See Also
--------
Series.reset_index : Remove row labels or move them to new columns.
Examples
--------
Create a series with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> ser = ps.Series([200, 200, 404, 404, 301],
... index=index, name='http_status')
>>> ser
Firefox 200
Chrome 200
Safari 404
IE10 404
Konqueror 301
Name: http_status, dtype: int64
Create a new index and reindex the Series. By default
values in the new index that do not have corresponding
records in the Series are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> ser.reindex(new_index).sort_index()
Chrome 200.0
Comodo Dragon NaN
IE10 404.0
Iceweasel NaN
Safari 404.0
Name: http_status, dtype: float64
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> ser.reindex(new_index, fill_value=0).sort_index()
Chrome 200
Comodo Dragon 0
IE10 404
Iceweasel 0
Safari 404
Name: http_status, dtype: int64
To further illustrate the filling functionality in
``reindex``, we will create a Series with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> ser2 = ps.Series([100, 101, np.nan, 100, 89, 88],
... name='prices', index=date_index)
>>> ser2.sort_index()
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Name: prices, dtype: float64
Suppose we decide to expand the series to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> ser2.reindex(date_index2).sort_index()
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Name: prices, dtype: float64
"""
return first_series(self.to_frame().reindex(index=index, fill_value=fill_value)).rename(
self.name
)
def reindex_like(self, other: Union["Series", "DataFrame"]) -> "Series":
"""
Return a Series with matching indices as other object.
Conform the object to the same index on all axes. Places NA/NaN in locations
having no value in the previous index.
Parameters
----------
other : Series or DataFrame
Its row and column indices are used to define the new indices
of this object.
Returns
-------
Series
Series with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, ...)``.
Examples
--------
>>> s1 = ps.Series([24.3, 31.0, 22.0, 35.0],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'),
... name="temp_celsius")
>>> s1
2014-02-12 24.3
2014-02-13 31.0
2014-02-14 22.0
2014-02-15 35.0
Name: temp_celsius, dtype: float64
>>> s2 = ps.Series(["low", "low", "medium"],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']),
... name="winspeed")
>>> s2
2014-02-12 low
2014-02-13 low
2014-02-15 medium
Name: winspeed, dtype: object
>>> s2.reindex_like(s1).sort_index()
2014-02-12 low
2014-02-13 low
2014-02-14 None
2014-02-15 medium
Name: winspeed, dtype: object
"""
if isinstance(other, (Series, DataFrame)):
return self.reindex(index=other.index)
else:
raise TypeError("other must be a pandas-on-Spark Series or DataFrame")
def fillna(
self,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool = False,
limit: Optional[int] = None,
) -> Optional["Series"]:
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
Series
Series with NA entries filled.
Examples
--------
>>> s = ps.Series([np.nan, 2, 3, 4, np.nan, 6], name='x')
>>> s
0 NaN
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
Name: x, dtype: float64
Replace all NaN elements with 0s.
>>> s.fillna(0)
0 0.0
1 2.0
2 3.0
3 4.0
4 0.0
5 6.0
Name: x, dtype: float64
We can also propagate non-null values forward or backward.
>>> s.fillna(method='ffill')
0 NaN
1 2.0
2 3.0
3 4.0
4 4.0
5 6.0
Name: x, dtype: float64
>>> s = ps.Series([np.nan, 'a', 'b', 'c', np.nan], name='x')
>>> s.fillna(method='ffill')
0 None
1 a
2 b
3 c
4 c
Name: x, dtype: object
"""
psser = self._fillna(value=value, method=method, axis=axis, limit=limit)
if method is not None:
psser = DataFrame(psser._psdf._internal.resolved_copy)._psser_for(self._column_label)
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._psdf._update_internal_frame(psser._psdf._internal, requires_same_anchor=False)
return None
else:
return psser._with_new_scol(psser.spark.column) # TODO: dtype?
def _fillna(
self,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
limit: Optional[int] = None,
part_cols: Sequence["ColumnOrName"] = (),
) -> "Series":
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if (value is None) and (method is None):
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
if (method is not None) and (method not in ["ffill", "pad", "backfill", "bfill"]):
raise ValueError("Expecting 'pad', 'ffill', 'backfill' or 'bfill'.")
scol = self.spark.column
if isinstance(self.spark.data_type, (FloatType, DoubleType)):
cond = scol.isNull() | F.isnan(scol)
else:
if not self.spark.nullable:
return self.copy()
cond = scol.isNull()
if value is not None:
if not isinstance(value, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(value).__name__)
if limit is not None:
raise ValueError("limit parameter for value is not support now")
scol = F.when(cond, value).otherwise(scol)
else:
if method in ["ffill", "pad"]:
func = F.last
end = Window.currentRow - 1
if limit is not None:
begin = Window.currentRow - limit
else:
begin = Window.unboundedPreceding
elif method in ["bfill", "backfill"]:
func = F.first
begin = Window.currentRow + 1
if limit is not None:
end = Window.currentRow + limit
else:
end = Window.unboundedFollowing
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(begin, end)
)
scol = F.when(cond, func(scol, True).over(window)).otherwise(scol)
return DataFrame(
self._psdf._internal.with_new_spark_column(
self._column_label, scol.alias(name_like_string(self.name)) # TODO: dtype?
)
)._psser_for(self._column_label)
def dropna(self, axis: Axis = 0, inplace: bool = False, **kwargs: Any) -> Optional["Series"]:
"""
Return a new Series with missing values removed.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
**kwargs
Not in use.
Returns
-------
Series
Series with NA entries dropped from it.
Examples
--------
>>> ser = ps.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
dtype: float64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# TODO: last two examples from pandas produce different results.
psdf = self._psdf[[self.name]].dropna(axis=axis, inplace=False)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "Series":
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
Series
Series with the values outside the clip boundaries replaced
Examples
--------
>>> ps.Series([0, 2, 4]).clip(1, 3)
0 1
1 2
2 3
dtype: int64
Notes
-----
One difference between this implementation and pandas is that running
`pd.Series(['a', 'b']).clip(0, 1)` will crash with "TypeError: '<=' not supported between
instances of 'str' and 'int'" while `ps.Series(['a', 'b']).clip(0, 1)` will output the
original Series, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise TypeError(
"List-like value are not supported for 'lower' and 'upper' at the " + "moment"
)
if lower is None and upper is None:
return self
if isinstance(self.spark.data_type, NumericType):
scol = self.spark.column
if lower is not None:
scol = F.when(scol < lower, lower).otherwise(scol)
if upper is not None:
scol = F.when(scol > upper, upper).otherwise(scol)
return self._with_new_scol(
scol.alias(self._internal.data_spark_column_names[0]),
field=self._internal.data_fields[0],
)
else:
return self
def drop(
self,
labels: Optional[Union[Name, List[Name]]] = None,
index: Optional[Union[Name, List[Name]]] = None,
level: Optional[int] = None,
) -> "Series":
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
index : None
Redundant for application on Series, but index can be used instead of labels.
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
Returns
-------
Series
Series with specified index labels removed.
See Also
--------
Series.dropna
Examples
--------
>>> s = ps.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
Drop single label A
>>> s.drop('A')
B 1
C 2
dtype: int64
Drop labels B and C
>>> s.drop(labels=['B', 'C'])
A 0
dtype: int64
With 'index' rather than 'labels' returns exactly same result.
>>> s.drop(index='A')
B 1
C 2
dtype: int64
>>> s.drop(index=['B', 'C'])
A 0
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1)
lama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
>>> s.drop(('lama', 'weight'))
lama speed 45.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop([('lama', 'speed'), ('falcon', 'weight')])
lama weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return first_series(self._drop(labels=labels, index=index, level=level))
def _drop(
self,
labels: Optional[Union[Name, List[Name]]] = None,
index: Optional[Union[Name, List[Name]]] = None,
level: Optional[int] = None,
) -> DataFrame:
if labels is not None:
if index is not None:
raise ValueError("Cannot specify both 'labels' and 'index'")
return self._drop(index=labels, level=level)
if index is not None:
internal = self._internal
if level is None:
level = 0
if level >= internal.index_level:
raise ValueError("'level' should be less than the number of indexes")
if is_name_like_tuple(index): # type: ignore
index_list = [cast(Label, index)]
elif is_name_like_value(index):
index_list = [(index,)]
elif all(is_name_like_value(idxes, allow_tuple=False) for idxes in index):
index_list = [(idex,) for idex in index]
elif not all(is_name_like_tuple(idxes) for idxes in index):
raise ValueError(
"If the given index is a list, it "
"should only contains names as all tuples or all non tuples "
"that contain index names"
)
else:
index_list = cast(List[Label], index)
drop_index_scols = []
for idxes in index_list:
try:
index_scols = [
internal.index_spark_columns[lvl] == idx
for lvl, idx in enumerate(idxes, level)
]
except IndexError:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
internal.index_level, len(idxes)
)
)
drop_index_scols.append(reduce(lambda x, y: x & y, index_scols))
cond = ~reduce(lambda x, y: x | y, drop_index_scols)
return DataFrame(internal.with_filter(cond))
else:
raise ValueError("Need to specify at least one of 'labels' or 'index'")
def head(self, n: int = 5) -> "Series":
"""
Return the first n rows.
This function returns the first n rows for the object based on position.
It is useful for quickly testing if your object has the right type of data in it.
Parameters
----------
n : Integer, default = 5
Returns
-------
The first n rows of the caller object.
Examples
--------
>>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion']})
>>> df.animal.head(2) # doctest: +NORMALIZE_WHITESPACE
0 alligator
1 bee
Name: animal, dtype: object
"""
return first_series(self.to_frame().head(n)).rename(self.name)
def last(self, offset: Union[str, DateOffset]) -> "Series":
"""
Select final periods of time series data based on a date offset.
When having a Series with dates as index, this function can
select the last few elements based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
Series
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psser = ps.Series([1, 2, 3, 4], index=index)
>>> psser
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
dtype: int64
Get the rows for the last 3 days:
>>> psser.last('3D')
2018-04-13 3
2018-04-15 4
dtype: int64
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
return first_series(self.to_frame().last(offset)).rename(self.name)
def first(self, offset: Union[str, DateOffset]) -> "Series":
"""
Select first periods of time series data based on a date offset.
When having a Series with dates as index, this function can
select the first few elements based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the first 3 days.
Returns
-------
Series
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psser = ps.Series([1, 2, 3, 4], index=index)
>>> psser
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
dtype: int64
Get the rows for the first 3 days:
>>> psser.first('3D')
2018-04-09 1
2018-04-11 2
dtype: int64
Notice the data for 3 first calendar days were returned, not the first
3 observed days in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
return first_series(self.to_frame().first(offset)).rename(self.name)
# TODO: Categorical type isn't supported (due to PySpark's limitation) and
# some doctests related with timestamps were not added.
def unique(self) -> "Series":
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
.. note:: This method returns newly created Series whereas pandas returns
the unique values as a NumPy array.
Returns
-------
Returns the unique values as a Series.
See Also
--------
Index.unique
groupby.SeriesGroupBy.unique
Examples
--------
>>> psser = ps.Series([2, 1, 3, 3], name='A')
>>> psser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1
... 2
... 3
Name: A, dtype: int64
>>> ps.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
0 2016-01-01
dtype: datetime64[ns]
>>> psser.name = ('x', 'a')
>>> psser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1
... 2
... 3
Name: (x, a), dtype: int64
"""
sdf = self._internal.spark_frame.select(self.spark.column).distinct()
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=None,
column_labels=[self._column_label],
data_spark_columns=[scol_for(sdf, self._internal.data_spark_column_names[0])],
data_fields=[self._internal.data_fields[0]],
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal))
def sort_values(
self, ascending: bool = True, inplace: bool = False, na_position: str = "last"
) -> Optional["Series"]:
"""
Sort by the values.
Sort a Series in ascending or descending order by some criterion.
Parameters
----------
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : Series ordered by values.
Examples
--------
>>> s = ps.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = ps.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]]._sort(
by=[self.spark.column], ascending=ascending, na_position=na_position
)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def sort_index(
self,
axis: Axis = 0,
level: Optional[Union[int, List[int]]] = None,
ascending: bool = True,
inplace: bool = False,
kind: str = None,
na_position: str = "last",
) -> Optional["Series"]:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
pandas-on-Spark does not allow specifying the sorting algorithm at the moment,
default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : Series
Examples
--------
>>> df = ps.Series([2, 1, np.nan], index=['b', 'a', np.nan])
>>> df.sort_index()
a 1.0
b 2.0
NaN NaN
dtype: float64
>>> df.sort_index(ascending=False)
b 2.0
a 1.0
NaN NaN
dtype: float64
>>> df.sort_index(na_position='first')
NaN NaN
a 1.0
b 2.0
dtype: float64
>>> df.sort_index(inplace=True)
>>> df
a 1.0
b 2.0
NaN NaN
dtype: float64
>>> df = ps.Series(range(4), index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], name='0')
>>> df.sort_index()
a 0 3
1 2
b 0 1
1 0
Name: 0, dtype: int64
>>> df.sort_index(level=1) # doctest: +SKIP
a 0 3
b 0 1
a 1 2
b 1 0
Name: 0, dtype: int64
>>> df.sort_index(level=[1, 0])
a 0 3
b 0 1
a 1 2
b 1 0
Name: 0, dtype: int64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]].sort_index(
axis=axis, level=level, ascending=ascending, kind=kind, na_position=na_position
)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def swaplevel(
self, i: Union[int, Name] = -2, j: Union[int, Name] = -1, copy: bool = True
) -> "Series":
"""
Swap levels i and j in a MultiIndex.
Default is to swap the two innermost levels of the index.
Parameters
----------
i, j : int, str
Level of the indices to be swapped. Can pass level name as string.
copy : bool, default True
Whether to copy underlying data. Must be True.
Returns
-------
Series
Series with levels swapped in MultiIndex.
Examples
--------
>>> midx = pd.MultiIndex.from_arrays([['a', 'b'], [1, 2]], names = ['word', 'number'])
>>> midx # doctest: +SKIP
MultiIndex([('a', 1),
('b', 2)],
names=['word', 'number'])
>>> psser = ps.Series(['x', 'y'], index=midx)
>>> psser
word number
a 1 x
b 2 y
dtype: object
>>> psser.swaplevel()
number word
1 a x
2 b y
dtype: object
>>> psser.swaplevel(0, 1)
number word
1 a x
2 b y
dtype: object
>>> psser.swaplevel('number', 'word')
number word
1 a x
2 b y
dtype: object
"""
assert copy is True
return first_series(self.to_frame().swaplevel(i, j, axis=0)).rename(self.name)
def swapaxes(self, i: Axis, j: Axis, copy: bool = True) -> "Series":
"""
Interchange axes and swap values axes appropriately.
Parameters
----------
i: {0 or 'index', 1 or 'columns'}. The axis to swap.
j: {0 or 'index', 1 or 'columns'}. The axis to swap.
copy : bool, default True.
Returns
-------
Series
Examples
--------
>>> psser = ps.Series([1, 2, 3], index=["x", "y", "z"])
>>> psser
x 1
y 2
z 3
dtype: int64
>>>
>>> psser.swapaxes(0, 0)
x 1
y 2
z 3
dtype: int64
"""
assert copy is True
i = validate_axis(i)
j = validate_axis(j)
if not i == j == 0:
raise ValueError("Axis must be 0 for Series")
return self.copy()
def add_prefix(self, prefix: str) -> "Series":
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
"""
assert isinstance(prefix, str)
internal = self._internal.resolved_copy
sdf = internal.spark_frame.select(
[
F.concat(SF.lit(prefix), index_spark_column).alias(index_spark_column_name)
for index_spark_column, index_spark_column_name in zip(
internal.index_spark_columns, internal.index_spark_column_names
)
]
+ internal.data_spark_columns
)
return first_series(
DataFrame(internal.with_new_sdf(sdf, index_fields=([None] * internal.index_level)))
)
def add_suffix(self, suffix: str) -> "Series":
"""
Suffix labels with string suffix.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
"""
assert isinstance(suffix, str)
internal = self._internal.resolved_copy
sdf = internal.spark_frame.select(
[
F.concat(index_spark_column, SF.lit(suffix)).alias(index_spark_column_name)
for index_spark_column, index_spark_column_name in zip(
internal.index_spark_columns, internal.index_spark_column_names
)
]
+ internal.data_spark_columns
)
return first_series(
DataFrame(internal.with_new_sdf(sdf, index_fields=([None] * internal.index_level)))
)
def corr(self, other: "Series", method: str = "pearson") -> float:
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
correlation : float
Examples
--------
>>> df = ps.DataFrame({'s1': [.2, .0, .6, .2],
... 's2': [.3, .6, .0, .1]})
>>> s1 = df.s1
>>> s2 = df.s2
>>> s1.corr(s2, method='pearson') # doctest: +ELLIPSIS
-0.851064...
>>> s1.corr(s2, method='spearman') # doctest: +ELLIPSIS
-0.948683...
Notes
-----
There are behavior differences between pandas-on-Spark and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. pandas-on-Spark will return an error.
* pandas-on-Spark doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
# This implementation is suboptimal because it computes more than necessary,
# but it should be a start
columns = ["__corr_arg1__", "__corr_arg2__"]
psdf = self._psdf.assign(__corr_arg1__=self, __corr_arg2__=other)[columns]
psdf.columns = columns
c = corr(psdf, method=method)
return c.loc[tuple(columns)]
def nsmallest(self, n: int = 5) -> "Series":
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ps.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nsmallest()
0 1.0
1 2.0
2 3.0
3 4.0
5 6.0
dtype: float64
>>> s.nsmallest(3)
0 1.0
1 2.0
2 3.0
dtype: float64
"""
return self.sort_values(ascending=True).head(n)
def nlargest(self, n: int = 5) -> "Series":
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ps.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
7 8.0
6 7.0
5 6.0
3 4.0
2 3.0
dtype: float64
>>> s.nlargest(n=3)
7 8.0
6 7.0
5 6.0
dtype: float64
"""
return self.sort_values(ascending=False).head(n)
def append(
self, to_append: "Series", ignore_index: bool = False, verify_integrity: bool = False
) -> "Series":
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise Exception on creating index with duplicates
Returns
-------
appended : Series
Examples
--------
>>> s1 = ps.Series([1, 2, 3])
>>> s2 = ps.Series([4, 5, 6])
>>> s3 = ps.Series([4, 5, 6], index=[3,4,5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With ignore_index set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
"""
return first_series(
self.to_frame().append(to_append.to_frame(), ignore_index, verify_integrity)
).rename(self.name)
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
random_state: Optional[int] = None,
) -> "Series":
return first_series(
self.to_frame().sample(n=n, frac=frac, replace=replace, random_state=random_state)
).rename(self.name)
sample.__doc__ = DataFrame.sample.__doc__
@no_type_check
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = PandasOnSparkPlotAccessor.hist.__doc__
def apply(self, func: Callable, args: Sequence[Any] = (), **kwds: Any) -> "Series":
"""
Invoke function on values of Series.
Can be a Python function that only works on the Series.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
Parameters
----------
func : function
Python function to apply. Note that type hint for return type is required.
args : tuple
Positional arguments passed to func after the series value.
**kwds
Additional keyword arguments passed to func.
Returns
-------
Series
See Also
--------
Series.aggregate : Only perform aggregating type operations.
Series.transform : Only perform transforming type operations.
DataFrame.apply : The equivalent function for DataFrame.
Examples
--------
Create a Series with typical summer temperatures for each city.
>>> s = ps.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x) -> np.int64:
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword
>>> def subtract_custom_value(x, custom_value) -> np.int64:
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``
>>> def add_custom_values(x, **kwargs) -> np.int64:
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library
>>> def numpy_log(col) -> np.float64:
... return np.log(col)
>>> s.apply(numpy_log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
You can omit the type hint and let pandas-on-Spark infer its type.
>>> s.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
assert callable(func), "the first argument should be a callable function."
try:
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
except TypeError:
# Falls back to schema inference if it fails to get signature.
should_infer_schema = True
apply_each = wraps(func)(lambda s: s.apply(func, args=args, **kwds))
if should_infer_schema:
return self.pandas_on_spark._transform_batch(apply_each, None)
else:
sig_return = infer_return_type(func)
if not isinstance(sig_return, ScalarType):
raise ValueError(
"Expected the return type of this function to be of scalar type, "
"but found type {}".format(sig_return)
)
return_type = cast(ScalarType, sig_return)
return self.pandas_on_spark._transform_batch(apply_each, return_type)
# TODO: not all arguments are implemented comparing to pandas' for now.
def aggregate(self, func: Union[str, List[str]]) -> Union[Scalar, "Series"]:
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func : str or a list of str
function name(s) as string apply to series.
Returns
-------
scalar, Series
The return can be:
- scalar : when Series.agg is called with single function
- Series : when Series.agg is called with several functions
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
Series.apply : Invoke function on a Series.
Series.transform : Only perform transforming type operations.
Series.groupby : Perform operations over groups.
DataFrame.aggregate : The equivalent function for DataFrame.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s.agg('min')
1
>>> s.agg(['min', 'max']).sort_index()
max 4
min 1
dtype: int64
"""
if isinstance(func, list):
return first_series(self.to_frame().aggregate(func)).rename(self.name)
elif isinstance(func, str):
return getattr(self, func)()
else:
raise TypeError("func must be a string or list of strings")
agg = aggregate
def transpose(self, *args: Any, **kwargs: Any) -> "Series":
"""
Return the transpose, which is by definition self.
Examples
--------
It returns the same object as the transpose of the given series object, which is by
definition self.
>>> s = ps.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.transpose()
0 1
1 2
2 3
dtype: int64
"""
return self.copy()
T = property(transpose)
def transform(
self, func: Union[Callable, List[Callable]], axis: Axis = 0, *args: Any, **kwargs: Any
) -> Union["Series", DataFrame]:
"""
Call ``func`` producing the same type as `self` with transformed values
and that has the same axis length as input.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
Parameters
----------
func : function or list
A function or a list of functions to use for transforming the data.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
An instance of the same type with `self` that must have the same length as input.
See Also
--------
Series.aggregate : Only perform aggregating type operations.
Series.apply : Invoke function on Series.
DataFrame.transform : The equivalent function for DataFrame.
Examples
--------
>>> s = ps.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> def sqrt(x) -> float:
... return np.sqrt(x)
>>> s.transform(sqrt)
0 0.000000
1 1.000000
2 1.414214
dtype: float64
Even though the resulting instance must have the same length as the
input, it is possible to provide several input functions:
>>> def exp(x) -> float:
... return np.exp(x)
>>> s.transform([sqrt, exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
You can omit the type hint and let pandas-on-Spark infer its type.
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if isinstance(func, list):
applied = []
for f in func:
applied.append(self.apply(f, args=args, **kwargs).rename(f.__name__))
internal = self._internal.with_new_columns(applied)
return DataFrame(internal)
else:
return self.apply(func, args=args, **kwargs)
def round(self, decimals: int = 0) -> "Series":
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Series object
See Also
--------
DataFrame.round
Examples
--------
>>> df = ps.Series([0.028208, 0.038683, 0.877076], name='x')
>>> df
0 0.028208
1 0.038683
2 0.877076
Name: x, dtype: float64
>>> df.round(2)
0 0.03
1 0.04
2 0.88
Name: x, dtype: float64
"""
if not isinstance(decimals, int):
raise TypeError("decimals must be an integer")
scol = F.round(self.spark.column, decimals)
return self._with_new_scol(scol) # TODO: dtype?
# TODO: add 'interpolation' parameter.
def quantile(
self, q: Union[float, Iterable[float]] = 0.5, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return value at the given quantile.
.. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile
based upon approximate percentile computation because computing quantile across
a large dataset is extremely expensive.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
float or Series
If the current object is a Series and ``q`` is an array, a Series will be
returned where the index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4, 5])
>>> s.quantile(.5)
3.0
>>> (s + 1).quantile(.5)
4.0
>>> s.quantile([.25, .5, .75])
0.25 2.0
0.50 3.0
0.75 4.0
dtype: float64
>>> (s + 1).quantile([.25, .5, .75])
0.25 3.0
0.50 4.0
0.75 5.0
dtype: float64
"""
if isinstance(q, Iterable):
return first_series(
self.to_frame().quantile(q=q, axis=0, numeric_only=False, accuracy=accuracy)
).rename(self.name)
else:
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
if not isinstance(q, float):
raise TypeError(
"q must be a float or an array of floats; however, [%s] found." % type(q)
)
q_float = cast(float, q)
if q_float < 0.0 or q_float > 1.0:
raise ValueError("percentiles should all be in the interval [0, 1].")
def quantile(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), q_float, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(quantile, name="quantile")
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method: str = "average", ascending: bool = True) -> "Series":
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> s = ps.Series([1, 2, 2, 3], name='A')
>>> s
0 1
1 2
2 2
3 3
Name: A, dtype: int64
>>> s.rank()
0 1.0
1 2.5
2 2.5
3 4.0
Name: A, dtype: float64
If method is set to 'min', it use lowest rank in group.
>>> s.rank(method='min')
0 1.0
1 2.0
2 2.0
3 4.0
Name: A, dtype: float64
If method is set to 'max', it use highest rank in group.
>>> s.rank(method='max')
0 1.0
1 3.0
2 3.0
3 4.0
Name: A, dtype: float64
If method is set to 'first', it is assigned rank in order without groups.
>>> s.rank(method='first')
0 1.0
1 2.0
2 3.0
3 4.0
Name: A, dtype: float64
If method is set to 'dense', it leaves no gaps in group.
>>> s.rank(method='dense')
0 1.0
1 2.0
2 2.0
3 3.0
Name: A, dtype: float64
"""
return self._rank(method, ascending).spark.analyzed
def _rank(
self,
method: str = "average",
ascending: bool = True,
*,
part_cols: Sequence["ColumnOrName"] = ()
) -> "Series":
if method not in ["average", "min", "max", "first", "dense"]:
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
raise ValueError(msg)
if self._internal.index_level > 1:
raise ValueError("rank do not support index now")
if ascending:
asc_func = lambda scol: scol.asc()
else:
asc_func = lambda scol: scol.desc()
if method == "first":
window = (
Window.orderBy(
asc_func(self.spark.column),
asc_func(F.col(NATURAL_ORDER_COLUMN_NAME)),
)
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
scol = F.row_number().over(window)
elif method == "dense":
window = (
Window.orderBy(asc_func(self.spark.column))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
scol = F.dense_rank().over(window)
else:
if method == "average":
stat_func = F.mean
elif method == "min":
stat_func = F.min
elif method == "max":
stat_func = F.max
window1 = (
Window.orderBy(asc_func(self.spark.column))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
window2 = Window.partitionBy([self.spark.column] + list(part_cols)).rowsBetween(
Window.unboundedPreceding, Window.unboundedFollowing
)
scol = stat_func(F.row_number().over(window1)).over(window2)
psser = self._with_new_scol(scol)
return psser.astype(np.float64)
def filter(
self,
items: Optional[Sequence[Any]] = None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis: Optional[Axis] = None,
) -> "Series":
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
return first_series(
self.to_frame().filter(items=items, like=like, regex=regex, axis=axis)
).rename(self.name)
filter.__doc__ = DataFrame.filter.__doc__
def describe(self, percentiles: Optional[List[float]] = None) -> "Series":
return first_series(self.to_frame().describe(percentiles)).rename(self.name)
describe.__doc__ = DataFrame.describe.__doc__
def diff(self, periods: int = 1) -> "Series":
"""
First discrete difference of element.
Calculates the difference of a Series element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : Series
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.b.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
Name: b, dtype: float64
Difference with previous value
>>> df.c.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 15.0
4 21.0
5 27.0
Name: c, dtype: float64
Difference with following value
>>> df.c.diff(periods=-1)
0 -3.0
1 -5.0
2 -7.0
3 -9.0
4 -11.0
5 NaN
Name: c, dtype: float64
"""
return self._diff(periods).spark.analyzed
def _diff(self, periods: int, *, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
if not isinstance(periods, int):
raise TypeError("periods should be an int; however, got [%s]" % type(periods).__name__)
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-periods, -periods)
)
scol = self.spark.column - F.lag(self.spark.column, periods).over(window)
return self._with_new_scol(scol, field=self._internal.data_fields[0].copy(nullable=True))
def idxmax(self, skipna: bool = True) -> Union[Tuple, Any]:
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
Returns
-------
Index
Label of the maximum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Examples
--------
>>> s = ps.Series(data=[1, None, 4, 3, 5],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 5.0
dtype: float64
>>> s.idxmax()
'E'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
In case of multi-index, you get a tuple:
>>> index = pd.MultiIndex.from_arrays([
... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))
>>> s = ps.Series(data=[1, None, 4, 5], index=index)
>>> s
first second
a c 1.0
d NaN
b e 4.0
f 5.0
dtype: float64
>>> s.idxmax()
('b', 'f')
If multiple values equal the maximum, the first row label with that
value is returned.
>>> s = ps.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
>>> s
10 1
3 100
5 1
2 100
1 1
8 100
dtype: int64
>>> s.idxmax()
3
"""
sdf = self._internal.spark_frame
scol = self.spark.column
index_scols = self._internal.index_spark_columns
# desc_nulls_(last|first) is used via Py4J directly because
# it's not supported in Spark 2.3.
if skipna:
sdf = sdf.orderBy(Column(scol._jc.desc_nulls_last()), NATURAL_ORDER_COLUMN_NAME)
else:
sdf = sdf.orderBy(Column(scol._jc.desc_nulls_first()), NATURAL_ORDER_COLUMN_NAME)
results = sdf.select([scol] + index_scols).take(1)
if len(results) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")
if results[0][0] is None:
# This will only happens when skipna is False because we will
# place nulls first.
return np.nan
values = list(results[0][1:])
if len(values) == 1:
return values[0]
else:
return tuple(values)
def idxmin(self, skipna: bool = True) -> Union[Tuple, Any]:
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = ps.Series(data=[1, None, 4, 0],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 0.0
dtype: float64
>>> s.idxmin()
'D'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
In case of multi-index, you get a tuple:
>>> index = pd.MultiIndex.from_arrays([
... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))
>>> s = ps.Series(data=[1, None, 4, 0], index=index)
>>> s
first second
a c 1.0
d NaN
b e 4.0
f 0.0
dtype: float64
>>> s.idxmin()
('b', 'f')
If multiple values equal the minimum, the first row label with that
value is returned.
>>> s = ps.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
>>> s
10 1
3 100
5 1
2 100
1 1
8 100
dtype: int64
>>> s.idxmin()
10
"""
sdf = self._internal.spark_frame
scol = self.spark.column
index_scols = self._internal.index_spark_columns
# asc_nulls_(last|first)is used via Py4J directly because
# it's not supported in Spark 2.3.
if skipna:
sdf = sdf.orderBy(Column(scol._jc.asc_nulls_last()), NATURAL_ORDER_COLUMN_NAME)
else:
sdf = sdf.orderBy(Column(scol._jc.asc_nulls_first()), NATURAL_ORDER_COLUMN_NAME)
results = sdf.select([scol] + index_scols).take(1)
if len(results) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")
if results[0][0] is None:
# This will only happens when skipna is False because we will
# place nulls first.
return np.nan
values = list(results[0][1:])
if len(values) == 1:
return values[0]
else:
return tuple(values)
def pop(self, item: Name) -> Union["Series", Scalar]:
"""
Return item and drop from series.
Parameters
----------
item : label
Label of index to be popped.
Returns
-------
Value that is popped from series.
Examples
--------
>>> s = ps.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
>>> s.pop('A')
0
>>> s
B 1
C 2
dtype: int64
>>> s = ps.Series(data=np.arange(3), index=['A', 'A', 'C'])
>>> s
A 0
A 1
C 2
dtype: int64
>>> s.pop('A')
A 0
A 1
dtype: int64
>>> s
C 2
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.pop('lama')
speed 45.0
weight 200.0
length 1.2
dtype: float64
>>> s
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
Also support for MultiIndex with several indexs.
>>> midx = pd.MultiIndex([['a', 'b', 'c'],
... ['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 0, 0, 0, 1, 1, 1],
... [0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 0, 2]]
... )
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
a lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
b falcon speed 320.0
speed 1.0
length 0.3
dtype: float64
>>> s.pop(('a', 'lama'))
speed 45.0
weight 200.0
length 1.2
dtype: float64
>>> s
a cow speed 30.0
weight 250.0
length 1.5
b falcon speed 320.0
speed 1.0
length 0.3
dtype: float64
>>> s.pop(('b', 'falcon', 'speed'))
(b, falcon, speed) 320.0
(b, falcon, speed) 1.0
dtype: float64
"""
if not is_name_like_value(item):
raise TypeError("'key' should be string or tuple that contains strings")
if not is_name_like_tuple(item):
item = (item,)
if self._internal.index_level < len(item):
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(item), self._internal.index_level
)
)
internal = self._internal
scols = internal.index_spark_columns[len(item) :] + [self.spark.column]
rows = [internal.spark_columns[level] == index for level, index in enumerate(item)]
sdf = internal.spark_frame.filter(reduce(lambda x, y: x & y, rows)).select(scols)
psdf = self._drop(item)
self._update_anchor(psdf)
if self._internal.index_level == len(item):
# if spark_frame has one column and one data, return data only without frame
pdf = sdf.limit(2).toPandas()
length = len(pdf)
if length == 1:
return pdf[internal.data_spark_column_names[0]].iloc[0]
item_string = name_like_string(item)
sdf = sdf.withColumn(SPARK_DEFAULT_INDEX_NAME, SF.lit(str(item_string)))
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
column_labels=[self._column_label],
data_fields=[self._internal.data_fields[0]],
)
return first_series(DataFrame(internal))
else:
internal = internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in internal.index_spark_column_names[len(item) :]
],
index_fields=internal.index_fields[len(item) :],
index_names=self._internal.index_names[len(item) :],
data_spark_columns=[scol_for(sdf, internal.data_spark_column_names[0])],
)
return first_series(DataFrame(internal))
def copy(self, deep: bool = True) -> "Series":
"""
Make a copy of this object's indices and data.
Parameters
----------
deep : bool, default True
this parameter is not supported but just dummy parameter to match pandas.
Returns
-------
copy : Series
Examples
--------
>>> s = ps.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
"""
return self._psdf.copy(deep=deep)._psser_for(self._column_label)
def mode(self, dropna: bool = True) -> "Series":
"""
Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
Series
Modes of the Series.
Examples
--------
>>> s = ps.Series([0, 0, 1, 1, 1, np.nan, np.nan, np.nan])
>>> s
0 0.0
1 0.0
2 1.0
3 1.0
4 1.0
5 NaN
6 NaN
7 NaN
dtype: float64
>>> s.mode()
0 1.0
dtype: float64
If there are several same modes, all items are shown
>>> s = ps.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
... np.nan, np.nan, np.nan])
>>> s
0 0.0
1 0.0
2 1.0
3 1.0
4 1.0
5 2.0
6 2.0
7 2.0
8 3.0
9 3.0
10 3.0
11 NaN
12 NaN
13 NaN
dtype: float64
>>> s.mode().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1.0
... 2.0
... 3.0
dtype: float64
With 'dropna' set to 'False', we can also see NaN in the result
>>> s.mode(False).sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1.0
... 2.0
... 3.0
... NaN
dtype: float64
"""
ser_count = self.value_counts(dropna=dropna, sort=False)
sdf_count = ser_count._internal.spark_frame
most_value = ser_count.max()
sdf_most_value = sdf_count.filter("count == {}".format(most_value))
sdf = sdf_most_value.select(
F.col(SPARK_DEFAULT_INDEX_NAME).alias(SPARK_DEFAULT_SERIES_NAME)
)
internal = InternalFrame(spark_frame=sdf, index_spark_columns=None, column_labels=[None])
return first_series(DataFrame(internal))
def keys(self) -> "ps.Index":
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
Examples
--------
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> psser = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> psser.keys() # doctest: +SKIP
MultiIndex([( 'lama', 'speed'),
( 'lama', 'weight'),
( 'lama', 'length'),
( 'cow', 'speed'),
( 'cow', 'weight'),
( 'cow', 'length'),
('falcon', 'speed'),
('falcon', 'weight'),
('falcon', 'length')],
)
"""
return self.index
# TODO: 'regex', 'method' parameter
def replace(
self,
to_replace: Optional[Union[Any, List, Tuple, Dict]] = None,
value: Optional[Union[List, Tuple]] = None,
regex: bool = False,
) -> "Series":
"""
Replace values given in to_replace with value.
Values of the Series are replaced with other values dynamically.
Parameters
----------
to_replace : str, list, tuple, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str:
- numeric: numeric values equal to to_replace will be replaced with value
- str: string exactly matching to_replace will be replaced with value
* list of str or numeric:
- if to_replace and value are both lists or tuples, they must be the same length.
- str and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values for different
existing values.
For example, {'a': 'b', 'y': 'z'} replaces the value ‘a’ with ‘b’ and ‘y’
with ‘z’. To use a dict in this way the value parameter should be None.
- For a DataFrame a dict can specify that different values should be replaced
in different columns. For example, {'a': 1, 'b': 'z'} looks for the value 1
in column ‘a’ and the value ‘z’ in column ‘b’ and replaces these values with
whatever is specified in value.
The value parameter should not be None in this case.
You can treat this as a special case of passing two lists except that you are
specifying the column to search in.
See the examples section for examples of each of these.
value : scalar, dict, list, tuple, str default None
Value to replace any values matching to_replace with.
For a DataFrame a dict of values can be used to specify which value to use
for each column (columns not in the dict will not be filled).
Regular expressions, strings and lists or dicts of such objects are also allowed.
Returns
-------
Series
Object after replacement.
Examples
--------
Scalar `to_replace` and `value`
>>> s = ps.Series([0, 1, 2, 3, 4])
>>> s
0 0
1 1
2 2
3 3
4 4
dtype: int64
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
List-like `to_replace`
>>> s.replace([0, 4], 5000)
0 5000
1 1
2 2
3 3
4 5000
dtype: int64
>>> s.replace([1, 2, 3], [10, 20, 30])
0 0
1 10
2 20
3 30
4 4
dtype: int64
Dict-like `to_replace`
>>> s.replace({1: 1000, 2: 2000, 3: 3000, 4: 4000})
0 0
1 1000
2 2000
3 3000
4 4000
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace(45, 450)
lama speed 450.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace([45, 30, 320], 500)
lama speed 500.0
weight 200.0
length 1.2
cow speed 500.0
weight 250.0
length 1.5
falcon speed 500.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace({45: 450, 30: 300})
lama speed 450.0
weight 200.0
length 1.2
cow speed 300.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
"""
if to_replace is None:
return self.fillna(method="ffill")
if not isinstance(to_replace, (str, list, tuple, dict, int, float)):
raise TypeError("'to_replace' should be one of str, list, tuple, dict, int, float")
if regex:
raise NotImplementedError("replace currently not support for regex")
to_replace = list(to_replace) if isinstance(to_replace, tuple) else to_replace
value = list(value) if isinstance(value, tuple) else value
if isinstance(to_replace, list) and isinstance(value, list):
if not len(to_replace) == len(value):
raise ValueError(
"Replacement lists must match in length. Expecting {} got {}".format(
len(to_replace), len(value)
)
)
to_replace = {k: v for k, v in zip(to_replace, value)}
if isinstance(to_replace, dict):
is_start = True
if len(to_replace) == 0:
current = self.spark.column
else:
for to_replace_, value in to_replace.items():
cond = (
(F.isnan(self.spark.column) | self.spark.column.isNull())
if pd.isna(to_replace_)
else (self.spark.column == SF.lit(to_replace_))
)
if is_start:
current = F.when(cond, value)
is_start = False
else:
current = current.when(cond, value)
current = current.otherwise(self.spark.column)
else:
cond = self.spark.column.isin(to_replace)
# to_replace may be a scalar
if np.array(pd.isna(to_replace)).any():
cond = cond | F.isnan(self.spark.column) | self.spark.column.isNull()
current = F.when(cond, value).otherwise(self.spark.column)
return self._with_new_scol(current) # TODO: dtype?
def update(self, other: "Series") -> None:
"""
Modify Series in place using non-NA values from passed Series. Aligns on index.
Parameters
----------
other : Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, 5, 6]))
>>> s.sort_index()
0 4
1 5
2 6
dtype: int64
>>> s = ps.Series(['a', 'b', 'c'])
>>> s.update(ps.Series(['d', 'e'], index=[0, 2]))
>>> s.sort_index()
0 d
1 b
2 e
dtype: object
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, 5, 6, 7, 8]))
>>> s.sort_index()
0 4
1 5
2 6
dtype: int64
>>> s = ps.Series([1, 2, 3], index=[10, 11, 12])
>>> s
10 1
11 2
12 3
dtype: int64
>>> s.update(ps.Series([4, 5, 6]))
>>> s.sort_index()
10 1
11 2
12 3
dtype: int64
>>> s.update(ps.Series([4, 5, 6], index=[11, 12, 13]))
>>> s.sort_index()
10 1
11 4
12 5
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, np.nan, 6]))
>>> s.sort_index()
0 4.0
1 2.0
2 6.0
dtype: float64
>>> reset_option("compute.ops_on_diff_frames")
"""
if not isinstance(other, Series):
raise TypeError("'other' must be a Series")
combined = combine_frames(self._psdf, other._psdf, how="leftouter")
this_scol = combined["this"]._internal.spark_column_for(self._column_label)
that_scol = combined["that"]._internal.spark_column_for(other._column_label)
scol = (
F.when(that_scol.isNotNull(), that_scol)
.otherwise(this_scol)
.alias(self._psdf._internal.spark_column_name_for(self._column_label))
)
internal = combined["this"]._internal.with_new_spark_column(
self._column_label, scol # TODO: dtype?
)
self._psdf._update_internal_frame(internal.resolved_copy, requires_same_anchor=False)
def where(self, cond: "Series", other: Any = np.nan) -> "Series":
"""
Replace values where the condition is False.
Parameters
----------
cond : boolean Series
Where cond is True, keep the original value. Where False,
replace with corresponding value from other.
other : scalar, Series
Entries where cond is False are replaced with corresponding value from other.
Returns
-------
Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([0, 1, 2, 3, 4])
>>> s2 = ps.Series([100, 200, 300, 400, 500])
>>> s1.where(s1 > 0).sort_index()
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s1.where(s1 > 1, 10).sort_index()
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> s1.where(s1 > 1, s1 + 100).sort_index()
0 100
1 101
2 2
3 3
4 4
dtype: int64
>>> s1.where(s1 > 1, s2).sort_index()
0 100
1 200
2 2
3 3
4 4
dtype: int64
>>> reset_option("compute.ops_on_diff_frames")
"""
assert isinstance(cond, Series)
# We should check the DataFrame from both `cond` and `other`.
should_try_ops_on_diff_frame = not same_anchor(cond, self) or (
isinstance(other, Series) and not same_anchor(other, self)
)
if should_try_ops_on_diff_frame:
# Try to perform it with 'compute.ops_on_diff_frame' option.
psdf = self.to_frame()
tmp_cond_col = verify_temp_column_name(psdf, "__tmp_cond_col__")
tmp_other_col = verify_temp_column_name(psdf, "__tmp_other_col__")
psdf[tmp_cond_col] = cond
psdf[tmp_other_col] = other
# above logic makes a Spark DataFrame looks like below:
# +-----------------+---+----------------+-----------------+
# |__index_level_0__| 0|__tmp_cond_col__|__tmp_other_col__|
# +-----------------+---+----------------+-----------------+
# | 0| 0| false| 100|
# | 1| 1| false| 200|
# | 3| 3| true| 400|
# | 2| 2| true| 300|
# | 4| 4| true| 500|
# +-----------------+---+----------------+-----------------+
condition = (
F.when(
psdf[tmp_cond_col].spark.column,
psdf._psser_for(psdf._internal.column_labels[0]).spark.column,
)
.otherwise(psdf[tmp_other_col].spark.column)
.alias(psdf._internal.data_spark_column_names[0])
)
internal = psdf._internal.with_new_columns(
[condition], column_labels=self._internal.column_labels
)
return first_series(DataFrame(internal))
else:
if isinstance(other, Series):
other = other.spark.column
condition = (
F.when(cond.spark.column, self.spark.column)
.otherwise(other)
.alias(self._internal.data_spark_column_names[0])
)
return self._with_new_scol(condition)
def mask(self, cond: "Series", other: Any = np.nan) -> "Series":
"""
Replace values where the condition is True.
Parameters
----------
cond : boolean Series
Where cond is False, keep the original value. Where True,
replace with corresponding value from other.
other : scalar, Series
Entries where cond is True are replaced with corresponding value from other.
Returns
-------
Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([0, 1, 2, 3, 4])
>>> s2 = ps.Series([100, 200, 300, 400, 500])
>>> s1.mask(s1 > 0).sort_index()
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s1.mask(s1 > 1, 10).sort_index()
0 0
1 1
2 10
3 10
4 10
dtype: int64
>>> s1.mask(s1 > 1, s1 + 100).sort_index()
0 0
1 1
2 102
3 103
4 104
dtype: int64
>>> s1.mask(s1 > 1, s2).sort_index()
0 0
1 1
2 300
3 400
4 500
dtype: int64
>>> reset_option("compute.ops_on_diff_frames")
"""
return self.where(cast(Series, ~cond), other)
def xs(self, key: Name, level: Optional[int] = None) -> "Series":
"""
Return cross-section from the Series.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
Returns
-------
Series
Cross-section from the original Series
corresponding to the selected index levels.
Examples
--------
>>> midx = pd.MultiIndex([['a', 'b', 'c'],
... ['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
a lama speed 45.0
weight 200.0
length 1.2
b cow speed 30.0
weight 250.0
length 1.5
c falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
Get values at specified index
>>> s.xs('a')
lama speed 45.0
weight 200.0
length 1.2
dtype: float64
Get values at several indexes
>>> s.xs(('a', 'lama'))
speed 45.0
weight 200.0
length 1.2
dtype: float64
Get values at specified index and level
>>> s.xs('lama', level=1)
a speed 45.0
weight 200.0
length 1.2
dtype: float64
"""
if not isinstance(key, tuple):
key = (key,)
if level is None:
level = 0
internal = self._internal
scols = (
internal.index_spark_columns[:level]
+ internal.index_spark_columns[level + len(key) :]
+ [self.spark.column]
)
rows = [internal.spark_columns[lvl] == index for lvl, index in enumerate(key, level)]
sdf = internal.spark_frame.filter(reduce(lambda x, y: x & y, rows)).select(scols)
if internal.index_level == len(key):
# if spark_frame has one column and one data, return data only without frame
pdf = sdf.limit(2).toPandas()
length = len(pdf)
if length == 1:
return pdf[self._internal.data_spark_column_names[0]].iloc[0]
index_spark_column_names = (
internal.index_spark_column_names[:level]
+ internal.index_spark_column_names[level + len(key) :]
)
index_names = internal.index_names[:level] + internal.index_names[level + len(key) :]
index_fields = internal.index_fields[:level] + internal.index_fields[level + len(key) :]
internal = internal.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_spark_column_names],
index_names=index_names,
index_fields=index_fields,
data_spark_columns=[scol_for(sdf, internal.data_spark_column_names[0])],
)
return first_series(DataFrame(internal))
def pct_change(self, periods: int = 1) -> "Series":
"""
Percentage change between the current and a prior element.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
Returns
-------
Series
Examples
--------
>>> psser = ps.Series([90, 91, 85], index=[2, 4, 1])
>>> psser
2 90
4 91
1 85
dtype: int64
>>> psser.pct_change()
2 NaN
4 0.011111
1 -0.065934
dtype: float64
>>> psser.sort_index().pct_change()
1 NaN
2 0.058824
4 0.011111
dtype: float64
>>> psser.pct_change(periods=2)
2 NaN
4 NaN
1 -0.055556
dtype: float64
"""
scol = self.spark.column
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods)
prev_row = F.lag(scol, periods).over(window)
return self._with_new_scol((scol - prev_row) / prev_row).spark.analyzed
def combine_first(self, other: "Series") -> "Series":
"""
Combine Series values, choosing the calling Series's values first.
Parameters
----------
other : Series
The value(s) to be combined with the `Series`.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine : Perform elementwise operation on two Series
using a given function.
Notes
-----
Result index will be the union of the two indexes.
Examples
--------
>>> s1 = ps.Series([1, np.nan])
>>> s2 = ps.Series([3, 4])
>>> with ps.option_context("compute.ops_on_diff_frames", True):
... s1.combine_first(s2)
0 1.0
1 4.0
dtype: float64
"""
if not isinstance(other, ps.Series):
raise TypeError("`combine_first` only allows `Series` for parameter `other`")
if same_anchor(self, other):
this = self.spark.column
that = other.spark.column
combined = self._psdf
else:
combined = combine_frames(self._psdf, other._psdf)
this = combined["this"]._internal.spark_column_for(self._column_label)
that = combined["that"]._internal.spark_column_for(other._column_label)
# If `self` has missing value, use value of `other`
cond = F.when(this.isNull(), that).otherwise(this)
# If `self` and `other` come from same frame, the anchor should be kept
if same_anchor(self, other):
return self._with_new_scol(cond) # TODO: dtype?
index_scols = combined._internal.index_spark_columns
sdf = combined._internal.spark_frame.select(
*index_scols, cond.alias(self._internal.data_spark_column_names[0])
).distinct()
internal = self._internal.with_new_sdf(
sdf, index_fields=combined._internal.index_fields, data_fields=[None] # TODO: dtype?
)
return first_series(DataFrame(internal))
def dot(self, other: Union["Series", DataFrame]) -> Union[Scalar, "Series"]:
"""
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame.
It can also be called using `self @ other` in Python >= 3.5.
.. note:: This API is slightly different from pandas when indexes from both Series
are not aligned. To match with pandas', it requires to read the whole data for,
for example, counting. pandas raises an exception; however, pandas-on-Spark
just proceeds and performs by ignoring mismatches with NaN permissively.
>>> pdf1 = pd.Series([1, 2, 3], index=[0, 1, 2])
>>> pdf2 = pd.Series([1, 2, 3], index=[0, 1, 3])
>>> pdf1.dot(pdf2) # doctest: +SKIP
...
ValueError: matrices are not aligned
>>> psdf1 = ps.Series([1, 2, 3], index=[0, 1, 2])
>>> psdf2 = ps.Series([1, 2, 3], index=[0, 1, 3])
>>> psdf1.dot(psdf2) # doctest: +SKIP
5
Parameters
----------
other : Series, DataFrame.
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = ps.Series([0, 1, 2, 3])
>>> s.dot(s)
14
>>> s @ s
14
>>> psdf = ps.DataFrame({'x': [0, 1, 2, 3], 'y': [0, -1, -2, -3]})
>>> psdf
x y
0 0 0
1 1 -1
2 2 -2
3 3 -3
>>> with ps.option_context("compute.ops_on_diff_frames", True):
... s.dot(psdf)
...
x 14
y -14
dtype: int64
"""
if isinstance(other, DataFrame):
if not same_anchor(self, other):
if not self.index.sort_values().equals(other.index.sort_values()):
raise ValueError("matrices are not aligned")
other_copy = other.copy() # type: DataFrame
column_labels = other_copy._internal.column_labels
self_column_label = verify_temp_column_name(other_copy, "__self_column__")
other_copy[self_column_label] = self
self_psser = other_copy._psser_for(self_column_label)
product_pssers = [
cast(Series, other_copy._psser_for(label) * self_psser) for label in column_labels
]
dot_product_psser = DataFrame(
other_copy._internal.with_new_columns(product_pssers, column_labels=column_labels)
).sum()
return cast(Series, dot_product_psser).rename(self.name)
else:
assert isinstance(other, Series)
if not same_anchor(self, other):
if len(self.index) != len(other.index):
raise ValueError("matrices are not aligned")
return (self * other).sum()
def __matmul__(self, other: Union["Series", DataFrame]) -> Union[Scalar, "Series"]:
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def repeat(self, repeats: Union[int, "Series"]) -> "Series":
"""
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or Series
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
Examples
--------
>>> s = ps.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
1 b
2 c
0 a
1 b
2 c
dtype: object
>>> ps.Series([1, 2, 3]).repeat(0)
Series([], dtype: int64)
"""
if not isinstance(repeats, (int, Series)):
raise TypeError(
"`repeats` argument must be integer or Series, but got {}".format(type(repeats))
)
if isinstance(repeats, Series):
if not same_anchor(self, repeats):
psdf = self.to_frame()
temp_repeats = verify_temp_column_name(psdf, "__temp_repeats__")
psdf[temp_repeats] = repeats
return (
psdf._psser_for(psdf._internal.column_labels[0])
.repeat(psdf[temp_repeats])
.rename(self.name)
)
else:
scol = F.explode(
F.array_repeat(self.spark.column, repeats.astype("int32").spark.column)
).alias(name_like_string(self.name))
sdf = self._internal.spark_frame.select(self._internal.index_spark_columns + [scol])
internal = self._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
data_spark_columns=[scol_for(sdf, name_like_string(self.name))],
)
return first_series(DataFrame(internal))
else:
if repeats < 0:
raise ValueError("negative dimensions are not allowed")
psdf = self._psdf[[self.name]]
if repeats == 0:
return first_series(DataFrame(psdf._internal.with_filter(SF.lit(False))))
else:
return first_series(ps.concat([psdf] * repeats))
def asof(self, where: Union[Any, List]) -> Union[Scalar, "Series"]:
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
If there is no good value, NaN is returned.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
where : index or array-like of indices
Returns
-------
scalar or Series
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like
Return scalar or Series
Notes
-----
Indices are assumed to be sorted. Raises if this is not the case.
Examples
--------
>>> s = ps.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
A scalar `where`.
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20]).sort_index()
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
"""
should_return_series = True
if isinstance(self.index, ps.MultiIndex):
raise ValueError("asof is not supported for a MultiIndex")
if isinstance(where, (ps.Index, ps.Series, DataFrame)):
raise ValueError("where cannot be an Index, Series or a DataFrame")
if not self.index.is_monotonic_increasing:
raise ValueError("asof requires a sorted index")
if not is_list_like(where):
should_return_series = False
where = [where]
index_scol = self._internal.index_spark_columns[0]
index_type = self._internal.spark_type_for(index_scol)
cond = [
F.max(F.when(index_scol <= SF.lit(index).cast(index_type), self.spark.column))
for index in where
]
sdf = self._internal.spark_frame.select(cond)
if not should_return_series:
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
result = cast(pd.DataFrame, sdf.limit(1).toPandas()).iloc[0, 0]
return result if result is not None else np.nan
# The data is expected to be small so it's fine to transpose/use default index.
with ps.option_context("compute.default_index_type", "distributed", "compute.max_rows", 1):
psdf = ps.DataFrame(sdf) # type: DataFrame
psdf.columns = pd.Index(where)
return first_series(psdf.transpose()).rename(self.name)
def mad(self) -> float:
"""
Return the mean absolute deviation of values.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.mad()
1.0
"""
sdf = self._internal.spark_frame
spark_column = self.spark.column
avg = unpack_scalar(sdf.select(F.avg(spark_column)))
mad = unpack_scalar(sdf.select(F.avg(F.abs(spark_column - avg))))
return mad
def unstack(self, level: int = -1) -> DataFrame:
"""
Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Notes
-----
Unlike pandas, pandas-on-Spark doesn't check whether an index is duplicated or not
because the checking of duplicated index requires scanning whole data which
can be quite expensive.
Parameters
----------
level : int, str, or list of these, default last level
Level(s) to unstack, can pass level name.
Returns
-------
DataFrame
Unstacked Series.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4],
... index=pd.MultiIndex.from_product([['one', 'two'],
... ['a', 'b']]))
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1).sort_index()
a b
one 1 2
two 3 4
>>> s.unstack(level=0).sort_index()
one two
a 1 3
b 2 4
"""
if not isinstance(self.index, ps.MultiIndex):
raise ValueError("Series.unstack only support for a MultiIndex")
index_nlevels = self.index.nlevels
if level > 0 and (level > index_nlevels - 1):
raise IndexError(
"Too many levels: Index has only {} levels, not {}".format(index_nlevels, level + 1)
)
elif level < 0 and (level < -index_nlevels):
raise IndexError(
"Too many levels: Index has only {} levels, {} is not a valid level number".format(
index_nlevels, level
)
)
internal = self._internal.resolved_copy
index_map = list(zip(internal.index_spark_column_names, internal.index_names))
pivot_col, column_label_names = index_map.pop(level)
index_scol_names, index_names = zip(*index_map)
col = internal.data_spark_column_names[0]
sdf = internal.spark_frame
sdf = sdf.groupby(list(index_scol_names)).pivot(pivot_col).agg(F.first(scol_for(sdf, col)))
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_scol_names],
index_names=list(index_names),
column_label_names=[column_label_names],
)
return DataFrame(internal)
def item(self) -> Scalar:
"""
Return the first element of the underlying data as a Python scalar.
Returns
-------
scalar
The first element of Series.
Raises
------
ValueError
If the data is not length-1.
Examples
--------
>>> psser = ps.Series([10])
>>> psser.item()
10
"""
return self.head(2)._to_internal_pandas().item()
def iteritems(self) -> Iterable[Tuple[Name, Any]]:
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
.. note:: Unlike pandas', the iteritems in pandas-on-Spark returns generator rather
zip object
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = ps.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
... print("Index : {}, Value : {}".format(index, value))
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
"""
internal_index_columns = self._internal.index_spark_column_names
internal_data_column = self._internal.data_spark_column_names[0]
def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]:
k = (
row[internal_index_columns[0]]
if len(internal_index_columns) == 1
else tuple(row[c] for c in internal_index_columns)
)
v = row[internal_data_column]
return k, v
for k, v in map(
extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()
):
yield k, v
def items(self) -> Iterable[Tuple[Name, Any]]:
"""This is an alias of ``iteritems``."""
return self.iteritems()
def droplevel(self, level: Union[int, Name, List[Union[int, Name]]]) -> "Series":
"""
Return Series with requested index level(s) removed.
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
Returns
-------
Series
Series with requested index level(s) removed.
Examples
--------
>>> psser = ps.Series(
... [1, 2, 3],
... index=pd.MultiIndex.from_tuples(
... [("x", "a"), ("x", "b"), ("y", "c")], names=["level_1", "level_2"]
... ),
... )
>>> psser
level_1 level_2
x a 1
b 2
y c 3
dtype: int64
Removing specific index level by level
>>> psser.droplevel(0)
level_2
a 1
b 2
c 3
dtype: int64
Removing specific index level by name
>>> psser.droplevel("level_2")
level_1
x 1
x 2
y 3
dtype: int64
"""
return first_series(self.to_frame().droplevel(level=level, axis=0)).rename(self.name)
def tail(self, n: int = 5) -> "Series":
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> psser = ps.Series([1, 2, 3, 4, 5])
>>> psser
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> psser.tail(3) # doctest: +SKIP
2 3
3 4
4 5
dtype: int64
"""
return first_series(self.to_frame().tail(n=n)).rename(self.name)
def explode(self) -> "Series":
"""
Transform each element of a list-like to a row.
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Examples
--------
>>> psser = ps.Series([[1, 2, 3], [], [3, 4]])
>>> psser
0 [1, 2, 3]
1 []
2 [3, 4]
dtype: object
>>> psser.explode() # doctest: +SKIP
0 1.0
0 2.0
0 3.0
1 NaN
2 3.0
2 4.0
dtype: float64
"""
if not isinstance(self.spark.data_type, ArrayType):
return self.copy()
scol = F.explode_outer(self.spark.column).alias(name_like_string(self._column_label))
internal = self._internal.with_new_columns([scol], keep_order=False)
return first_series(DataFrame(internal))
def argsort(self) -> "Series":
"""
Return the integer indices that would sort the Series values.
Unlike pandas, the index order is not preserved in the result.
Returns
-------
Series
Positions of values within the sort order with -1 indicating
nan values.
Examples
--------
>>> psser = ps.Series([3, 3, 4, 1, 6, 2, 3, 7, 8, 7, 10])
>>> psser
0 3
1 3
2 4
3 1
4 6
5 2
6 3
7 7
8 8
9 7
10 10
dtype: int64
>>> psser.argsort().sort_index()
0 3
1 5
2 0
3 1
4 6
5 2
6 4
7 7
8 9
9 8
10 10
dtype: int64
"""
notnull = self.loc[self.notnull()]
sdf_for_index = notnull._internal.spark_frame.select(notnull._internal.index_spark_columns)
tmp_join_key = verify_temp_column_name(sdf_for_index, "__tmp_join_key__")
sdf_for_index, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_index, tmp_join_key
)
# sdf_for_index:
# +----------------+-----------------+
# |__tmp_join_key__|__index_level_0__|
# +----------------+-----------------+
# | 0| 0|
# | 1| 1|
# | 2| 2|
# | 3| 3|
# | 4| 4|
# +----------------+-----------------+
sdf_for_data = notnull._internal.spark_frame.select(
notnull.spark.column.alias("values"), NATURAL_ORDER_COLUMN_NAME
)
sdf_for_data, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_data, SPARK_DEFAULT_SERIES_NAME
)
# sdf_for_data:
# +---+------+-----------------+
# | 0|values|__natural_order__|
# +---+------+-----------------+
# | 0| 3| 25769803776|
# | 1| 3| 51539607552|
# | 2| 4| 77309411328|
# | 3| 1| 103079215104|
# | 4| 2| 128849018880|
# +---+------+-----------------+
sdf_for_data = sdf_for_data.sort(
scol_for(sdf_for_data, "values"), NATURAL_ORDER_COLUMN_NAME
).drop("values", NATURAL_ORDER_COLUMN_NAME)
tmp_join_key = verify_temp_column_name(sdf_for_data, "__tmp_join_key__")
sdf_for_data, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_data, tmp_join_key
)
# sdf_for_index: sdf_for_data:
# +----------------+-----------------+ +----------------+---+
# |__tmp_join_key__|__index_level_0__| |__tmp_join_key__| 0|
# +----------------+-----------------+ +----------------+---+
# | 0| 0| | 0| 3|
# | 1| 1| | 1| 4|
# | 2| 2| | 2| 0|
# | 3| 3| | 3| 1|
# | 4| 4| | 4| 2|
# +----------------+-----------------+ +----------------+---+
sdf = sdf_for_index.join(sdf_for_data, on=tmp_join_key).drop(tmp_join_key)
internal = self._internal.with_new_sdf(
spark_frame=sdf,
data_columns=[SPARK_DEFAULT_SERIES_NAME],
index_fields=[
InternalField(dtype=field.dtype) for field in self._internal.index_fields
],
data_fields=[None],
)
psser = first_series(DataFrame(internal))
return cast(
Series,
ps.concat([psser, self.loc[self.isnull()].spark.transform(lambda _: SF.lit(-1))]),
)
def argmax(self) -> int:
"""
Return int position of the largest value in the Series.
If the maximum is achieved in multiple locations,
the first row position is returned.
Returns
-------
int
Row position of the maximum value.
Examples
--------
Consider dataset containing cereal calories
>>> s = ps.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})
>>> s # doctest: +SKIP
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax() # doctest: +SKIP
2
"""
sdf = self._internal.spark_frame.select(self.spark.column, NATURAL_ORDER_COLUMN_NAME)
max_value = sdf.select(
F.max(scol_for(sdf, self._internal.data_spark_column_names[0])),
F.first(NATURAL_ORDER_COLUMN_NAME),
).head()
if max_value[1] is None:
raise ValueError("attempt to get argmax of an empty sequence")
elif max_value[0] is None:
return -1
# We should remember the natural sequence started from 0
seq_col_name = verify_temp_column_name(sdf, "__distributed_sequence_column__")
sdf, _ = InternalFrame.attach_distributed_sequence_column(
sdf.drop(NATURAL_ORDER_COLUMN_NAME), seq_col_name
)
# If the maximum is achieved in multiple locations, the first row position is returned.
return sdf.filter(
scol_for(sdf, self._internal.data_spark_column_names[0]) == max_value[0]
).head()[0]
def argmin(self) -> int:
"""
Return int position of the smallest value in the Series.
If the minimum is achieved in multiple locations,
the first row position is returned.
Returns
-------
int
Row position of the minimum value.
Examples
--------
Consider dataset containing cereal calories
>>> s = ps.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})
>>> s # doctest: +SKIP
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmin() # doctest: +SKIP
0
"""
sdf = self._internal.spark_frame.select(self.spark.column, NATURAL_ORDER_COLUMN_NAME)
min_value = sdf.select(
F.min(scol_for(sdf, self._internal.data_spark_column_names[0])),
F.first(NATURAL_ORDER_COLUMN_NAME),
).head()
if min_value[1] is None:
raise ValueError("attempt to get argmin of an empty sequence")
elif min_value[0] is None:
return -1
# We should remember the natural sequence started from 0
seq_col_name = verify_temp_column_name(sdf, "__distributed_sequence_column__")
sdf, _ = InternalFrame.attach_distributed_sequence_column(
sdf.drop(NATURAL_ORDER_COLUMN_NAME), seq_col_name
)
# If the minimum is achieved in multiple locations, the first row position is returned.
return sdf.filter(
scol_for(sdf, self._internal.data_spark_column_names[0]) == min_value[0]
).head()[0]
def compare(
self, other: "Series", keep_shape: bool = False, keep_equal: bool = False
) -> DataFrame:
"""
Compare to another Series and show the differences.
Parameters
----------
other : Series
Object to compare with.
keep_shape : bool, default False
If true, all rows and columns are kept.
Otherwise, only the ones with different values are kept.
keep_equal : bool, default False
If true, the result keeps values that are equal.
Otherwise, equal values are shown as NaNs.
Returns
-------
DataFrame
Notes
-----
Matching NaNs will not appear as a difference.
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series(["a", "b", "c", "d", "e"])
>>> s2 = ps.Series(["a", "a", "c", "b", "e"])
Align the differences on columns
>>> s1.compare(s2).sort_index()
self other
1 b a
3 d b
Keep all original rows
>>> s1.compare(s2, keep_shape=True).sort_index()
self other
0 None None
1 b a
2 None None
3 d b
4 None None
Keep all original rows and also all original values
>>> s1.compare(s2, keep_shape=True, keep_equal=True).sort_index()
self other
0 a a
1 b a
2 c c
3 d b
4 e e
>>> reset_option("compute.ops_on_diff_frames")
"""
if same_anchor(self, other):
self_column_label = verify_temp_column_name(other.to_frame(), "__self_column__")
other_column_label = verify_temp_column_name(self.to_frame(), "__other_column__")
combined = DataFrame(
self._internal.with_new_columns(
[self.rename(self_column_label), other.rename(other_column_label)]
)
) # type: DataFrame
else:
if not self.index.equals(other.index):
raise ValueError("Can only compare identically-labeled Series objects")
combined = combine_frames(self.to_frame(), other.to_frame())
this_column_label = "self"
that_column_label = "other"
if keep_equal and keep_shape:
combined.columns = pd.Index([this_column_label, that_column_label])
return combined
this_data_scol = combined._internal.data_spark_columns[0]
that_data_scol = combined._internal.data_spark_columns[1]
index_scols = combined._internal.index_spark_columns
sdf = combined._internal.spark_frame
if keep_shape:
this_scol = (
F.when(this_data_scol == that_data_scol, None)
.otherwise(this_data_scol)
.alias(this_column_label)
)
this_field = combined._internal.data_fields[0].copy(
name=this_column_label, nullable=True
)
that_scol = (
F.when(this_data_scol == that_data_scol, None)
.otherwise(that_data_scol)
.alias(that_column_label)
)
that_field = combined._internal.data_fields[1].copy(
name=that_column_label, nullable=True
)
else:
sdf = sdf.filter(~this_data_scol.eqNullSafe(that_data_scol))
this_scol = this_data_scol.alias(this_column_label)
this_field = combined._internal.data_fields[0].copy(name=this_column_label)
that_scol = that_data_scol.alias(that_column_label)
that_field = combined._internal.data_fields[1].copy(name=that_column_label)
sdf = sdf.select(*index_scols, this_scol, that_scol, NATURAL_ORDER_COLUMN_NAME)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_fields=combined._internal.index_fields,
column_labels=[(this_column_label,), (that_column_label,)],
data_spark_columns=[scol_for(sdf, this_column_label), scol_for(sdf, that_column_label)],
data_fields=[this_field, that_field],
column_label_names=[None],
)
return DataFrame(internal)
def align(
self,
other: Union[DataFrame, "Series"],
join: str = "outer",
axis: Optional[Axis] = None,
copy: bool = True,
) -> Tuple["Series", Union[DataFrame, "Series"]]:
"""
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
Returns
-------
(left, right) : (Series, type of other)
Aligned objects.
Examples
--------
>>> ps.set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([7, 8, 9], index=[10, 11, 12])
>>> s2 = ps.Series(["g", "h", "i"], index=[10, 20, 30])
>>> aligned_l, aligned_r = s1.align(s2)
>>> aligned_l.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> aligned_r.sort_index()
10 g
11 None
12 None
20 h
30 i
dtype: object
Align with the join type "inner":
>>> aligned_l, aligned_r = s1.align(s2, join="inner")
>>> aligned_l.sort_index()
10 7
dtype: int64
>>> aligned_r.sort_index()
10 g
dtype: object
Align with a DataFrame:
>>> df = ps.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
>>> aligned_l, aligned_r = s1.align(df)
>>> aligned_l.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> aligned_r.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> ps.reset_option("compute.ops_on_diff_frames")
"""
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
self_df = self.to_frame()
left, right = self_df.align(other, join=join, axis=axis, copy=False)
if left is self_df:
left_ser = self
else:
left_ser = first_series(left).rename(self.name)
return (left_ser.copy(), right.copy()) if copy else (left_ser, right)
def between_time(
self,
start_time: Union[datetime.time, str],
end_time: Union[datetime.time, str],
include_start: bool = True,
include_end: bool = True,
axis: Axis = 0,
) -> "Series":
"""
Select values between particular times of the day (example: 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
Returns
-------
Series
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> psser = ps.Series([1, 2, 3, 4], index=idx)
>>> psser
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
dtype: int64
>>> psser.between_time('0:15', '0:45')
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
dtype: int64
"""
return first_series(
self.to_frame().between_time(start_time, end_time, include_start, include_end, axis)
).rename(self.name)
def at_time(
self, time: Union[datetime.time, str], asof: bool = False, axis: Axis = 0
) -> "Series":
"""
Select values at particular time of day (example: 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
Series
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> psser = ps.Series([1, 2, 3, 4], index=idx)
>>> psser
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
dtype: int64
>>> psser.at_time('12:00')
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
dtype: int64
"""
return first_series(self.to_frame().at_time(time, asof, axis)).rename(self.name)
def _cum(
self,
func: Callable[[Column], Column],
skipna: bool,
part_cols: Sequence["ColumnOrName"] = (),
ascending: bool = True,
) -> "Series":
# This is used to cummin, cummax, cumsum, etc.
if ascending:
window = (
Window.orderBy(F.asc(NATURAL_ORDER_COLUMN_NAME))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
else:
window = (
Window.orderBy(F.desc(NATURAL_ORDER_COLUMN_NAME))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
if skipna:
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
scol = F.when(
# Manually sets nulls given the column defined above.
self.spark.column.isNull(),
SF.lit(None),
).otherwise(func(self.spark.column).over(window))
else:
# Here, we use two Windows.
# One for real data.
# The other one for setting nulls after the first null it meets.
#
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 NaN
# 3 5.0 NaN
# 4 5.0 NaN
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
scol = F.when(
# By going through with max, it sets True after the first time it meets null.
F.max(self.spark.column.isNull()).over(window),
# Manually sets nulls given the column defined above.
SF.lit(None),
).otherwise(func(self.spark.column).over(window))
return self._with_new_scol(scol)
def _cumsum(self, skipna: bool, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
psser = self
if isinstance(psser.spark.data_type, BooleanType):
psser = psser.spark.transform(lambda scol: scol.cast(LongType()))
elif not isinstance(psser.spark.data_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return psser._cum(F.sum, skipna, part_cols)
def _cumprod(self, skipna: bool, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
if isinstance(self.spark.data_type, BooleanType):
scol = self._cum(
lambda scol: F.min(F.coalesce(scol, SF.lit(True))), skipna, part_cols
).spark.column.cast(LongType())
elif isinstance(self.spark.data_type, NumericType):
num_zeros = self._cum(
lambda scol: F.sum(F.when(scol == 0, 1).otherwise(0)), skipna, part_cols
).spark.column
num_negatives = self._cum(
lambda scol: F.sum(F.when(scol < 0, 1).otherwise(0)), skipna, part_cols
).spark.column
sign = F.when(num_negatives % 2 == 0, 1).otherwise(-1)
abs_prod = F.exp(
self._cum(lambda scol: F.sum(F.log(F.abs(scol))), skipna, part_cols).spark.column
)
scol = F.when(num_zeros > 0, 0).otherwise(sign * abs_prod)
if isinstance(self.spark.data_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(self.spark.data_type),
self.spark.data_type.simpleString(),
)
)
return self._with_new_scol(scol)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
dt = CachedAccessor("dt", DatetimeMethods)
str = CachedAccessor("str", StringMethods)
cat = CachedAccessor("cat", CategoricalAccessor)
plot = CachedAccessor("plot", PandasOnSparkPlotAccessor)
# ----------------------------------------------------------------------
def _apply_series_op(
self, op: Callable[["Series"], Union["Series", Column]], should_resolve: bool = False
) -> "Series":
psser_or_scol = op(self)
if isinstance(psser_or_scol, Series):
psser = psser_or_scol
else:
psser = self._with_new_scol(cast(Column, psser_or_scol))
if should_resolve:
internal = psser._internal.resolved_copy
return first_series(DataFrame(internal))
else:
return psser
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str_type,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Scalar:
"""
Applies sfun to the column and returns a scalar
Parameters
----------
sfun : the stats function to be used for aggregation
name : original pandas API name.
axis : used only for sanity check because series only support index axis.
numeric_only : not used by this implementation, but passed down by stats functions
"""
from inspect import signature
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
num_args = len(signature(sfun).parameters)
spark_column = self.spark.column
spark_type = self.spark.data_type
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
scol = cast(Callable[[Column], Column], sfun)(spark_column)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
scol = cast(Callable[[Column, DataType], Column], sfun)(spark_column, spark_type)
min_count = kwargs.get("min_count", 0)
if min_count > 0:
scol = F.when(Frame._count_expr(spark_column, spark_type) >= min_count, scol)
result = unpack_scalar(self._internal.spark_frame.select(scol))
return result if result is not None else np.nan
# Override the `groupby` to specify the actual return type annotation.
def groupby(
self,
by: Union[Name, "Series", List[Union[Name, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "SeriesGroupBy":
return cast(
"SeriesGroupBy", super().groupby(by=by, axis=axis, as_index=as_index, dropna=dropna)
)
groupby.__doc__ = Frame.groupby.__doc__
def _build_groupby(
self, by: List[Union["Series", Label]], as_index: bool, dropna: bool
) -> "SeriesGroupBy":
from pyspark.pandas.groupby import SeriesGroupBy
return SeriesGroupBy._build(self, by, as_index=as_index, dropna=dropna)
def __getitem__(self, key: Any) -> Any:
try:
if (isinstance(key, slice) and any(type(n) == int for n in [key.start, key.stop])) or (
type(key) == int
and not isinstance(self.index.spark.data_type, (IntegerType, LongType))
):
# Seems like pandas Series always uses int as positional search when slicing
# with ints, searches based on index values when the value is int.
return self.iloc[key]
return self.loc[key]
except SparkPandasIndexingError:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(key), self._internal.index_level
)
)
def __getattr__(self, item: str_type) -> Any:
if item.startswith("__"):
raise AttributeError(item)
if hasattr(MissingPandasLikeSeries, item):
property_or_func = getattr(MissingPandasLikeSeries, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError("'Series' object has no attribute '{}'".format(item))
def _to_internal_pandas(self) -> pd.Series:
"""
Return a pandas Series directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._psdf._internal.to_pandas_frame[self.name]
def __repr__(self) -> str_type:
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_string(name=self.name, dtype=self.dtype)
pser = self._psdf._get_or_create_repr_pandas_cache(max_display_count)[self.name]
pser_length = len(pser)
pser = pser.iloc[:max_display_count]
if pser_length > max_display_count:
repr_string = pser.to_string(length=True)
rest, prev_footer = repr_string.rsplit("\n", 1)
match = REPR_PATTERN.search(prev_footer)
if match is not None:
length = match.group("length")
dtype_name = str(self.dtype.name)
if self.name is None:
footer = "\ndtype: {dtype}\nShowing only the first {length}".format(
length=length, dtype=pprint_thing(dtype_name)
)
else:
footer = (
"\nName: {name}, dtype: {dtype}"
"\nShowing only the first {length}".format(
length=length, name=self.name, dtype=pprint_thing(dtype_name)
)
)
return rest + footer
return pser.to_string(name=self.name, dtype=self.dtype)
def __dir__(self) -> Iterable[str_type]:
if not isinstance(self.spark.data_type, StructType):
fields = []
else:
fields = [f for f in self.spark.data_type.fieldNames() if " " not in f]
return list(super().__dir__()) + fields
def __iter__(self) -> None:
return MissingPandasLikeSeries.__iter__(self)
if sys.version_info >= (3, 7):
# In order to support the type hints such as Series[...]. See DataFrame.__class_getitem__.
def __class_getitem__(cls, params: Any) -> Type[SeriesType]:
return _create_type_for_series_type(params)
elif (3, 5) <= sys.version_info < (3, 7):
# The implementation is in its metaclass so this flag is needed to distinguish
# pandas-on-Spark Series.
is_series = None
def unpack_scalar(sdf: SparkDataFrame) -> Any:
"""
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
"""
l = cast(pd.DataFrame, sdf.limit(2).toPandas())
assert len(l) == 1, (sdf, l)
row = l.iloc[0]
l2 = list(row)
assert len(l2) == 1, (row, l2)
return l2[0]
@overload
def first_series(df: DataFrame) -> Series:
...
@overload
def first_series(df: pd.DataFrame) -> pd.Series:
...
def first_series(df: Union[DataFrame, pd.DataFrame]) -> Union[Series, pd.Series]:
"""
Takes a DataFrame and returns the first column of the DataFrame as a Series
"""
assert isinstance(df, (DataFrame, pd.DataFrame)), type(df)
if isinstance(df, DataFrame):
return df._psser_for(df._internal.column_labels[0])
else:
return df[df.columns[0]]
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.series
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.series.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.series tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.series,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/MHDgenerator/MHDfluid.py | 1 | 10096 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from dolfin import *
import mshr
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
import MHDprec as MHDpreconditioner
import memory_profiler
import gc
import MHDmulti
import MHDmatrixSetup as MHDsetup
import Generator
#@profile
m = 2
set_log_active(False)
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx + 1
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
parameters["form_compiler"]["quadrature_degree"] = -1
mesh, boundaries, domains = Generator.Domain(nn)
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "CG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order-1)
Lagrange = FunctionSpace(mesh, "CG", order-1)
W = MixedFunctionSpace([Velocity, Pressure, Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
kappa = 1.0
Mu_m = 10.0
MU = 1.0
N = FacetNormal(mesh)
IterType = 'Full'
Split = "No"
Saddle = "No"
Stokes = "No"
SetupType = 'python-class'
params = [kappa,Mu_m,MU]
F_M = Expression(("0.0","0.0","0.0"))
F_S = Expression(("0.0","0.0","0.0"))
n = FacetNormal(mesh)
r0 = Expression(("0.0"))
b0 = Expression(("1.0", "0.0", "0.0"))
u0 = Expression(("1.0", "0", "0.0"))
Hiptmairtol = 1e-6
HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, Hiptmairtol, params)
B0 = 1.
delta = 0.1
x_on = 4.
x_off = 6.
u0, p0, b0, r0, Laplacian, Advection, gradPres, NScouple, CurlCurl, gradLagr, Mcouple = Generator.ExactSolution(params, B0, delta, x_on, x_off)
F_NS = -MU*Laplacian + Advection + gradPres - kappa*NScouple
if kappa == 0.0:
F_M = Mu_m*CurlCurl + gradLagr - kappa*Mcouple
else:
F_M = Mu_m*kappa*CurlCurl + gradLagr - kappa*Mcouple
MO.PrintStr("Seting up initial guess matricies",2,"=","\n\n","\n")
u_k, p_k = Generator.Stokes(Velocity, Pressure, F_S, u0, params, boundaries, domains)
b_k, r_k = Generator.Maxwell(Magnetic, Lagrange, F_M, b0, params, HiptmairMatrices, Hiptmairtol)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
(u, p, b, r) = TrialFunctions(W)
(v, q, c, s) = TestFunctions(W)
m11 = params[1]*params[0]*inner(curl(b),curl(c))*dx
m21 = inner(c,grad(r))*dx
m12 = inner(b,grad(s))*dx
a11 = params[2]*inner(grad(v), grad(u))*dx + inner((grad(u)*u_k),v)*dx + (1./2)*div(u_k)*inner(u,v)*dx - (1./2)*inner(u_k,n)*inner(u,v)*ds
a12 = -div(v)*p*dx
a21 = -div(u)*q*dx
CoupleT = params[0]*inner(cross(v,b_k),curl(b))*dx
Couple = -params[0]*inner(cross(u,b_k),curl(c))*dx
a = m11 + m12 + m21 + a11 + a21 + a12 + Couple + CoupleT
Lns = inner(F_S, v)*dx #+ inner(Neumann,v)*ds(2)
Lmaxwell = inner(F_M, c)*dx
m11 = params[1]*params[0]*inner(curl(b_k),curl(c))*dx
m21 = inner(c,grad(r_k))*dx
m12 = inner(b_k,grad(s))*dx
a11 = params[2]*inner(grad(v), grad(u_k))*dx + inner((grad(u_k)*u_k),v)*dx + (1./2)*div(u_k)*inner(u_k,v)*dx - (1./2)*inner(u_k,n)*inner(u_k,v)*ds
a12 = -div(v)*p_k*dx
a21 = -div(u_k)*q*dx
CoupleT = params[0]*inner(cross(v,b_k),curl(b_k))*dx
Couple = -params[0]*inner(cross(u_k,b_k),curl(c))*dx
L = Lns + Lmaxwell - (m11 + m12 + m21 + a11 + a21 + a12 + Couple + CoupleT)
# u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-10,Neumann=None,options ="New")
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
# pConst = - assemble(p_k*dx)/assemble(ones*dx)
# p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
#plot(b_k)
IS = MO.IndexSet(W, 'Blocks')
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 10 # max no of iterations allowed
SolutionTime = 0
outer = 0
u_is = PETSc.IS().createGeneral(range(Velocity.dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-5
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
bcu1 = DirichletBC(W.sub(0), Expression(("0.0","0.0","0.0")), boundaries, 1)
bcu2 = DirichletBC(W.sub(0), Expression(("0.0","0.0","0.0")), boundaries, 2)
bcb = DirichletBC(W.sub(2), Expression(("0.0","0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3), Expression("0.0"), boundary)
bcs = [bcu1, bcu2, bcb, bcr]
A, b = assemble_system(a, L, bcs)
# if iter == 2:
# ss
A, b = CP.Assemble(A,b)
u = b.duplicate()
n = FacetNormal(mesh)
b_t = TrialFunction(Velocity)
c_t = TestFunction(Velocity)
mat = as_matrix([[b_k[2]*b_k[2]+b_k[1]*b_k[1],-b_k[1]*b_k[0],-b_k[0]*b_k[2]],
[-b_k[1]*b_k[0],b_k[0]*b_k[0]+b_k[2]*b_k[2],-b_k[2]*b_k[1]],
[-b_k[0]*b_k[2],-b_k[1]*b_k[2],b_k[0]*b_k[0]+b_k[1]*b_k[1]]])
aa = params[2]*inner(grad(b_t), grad(c_t))*dx(W.mesh()) + inner((grad(b_t)*u_k),c_t)*dx(W.mesh()) +(1./2)*div(u_k)*inner(c_t,b_t)*dx(W.mesh()) - (1./2)*inner(u_k,n)*inner(c_t,b_t)*ds(W.mesh())+kappa/Mu_m*inner(mat*b_t,c_t)*dx(W.mesh())
ShiftedMass = assemble(aa)
for bc in [bcu1, bcu2]:
bc.apply(ShiftedMass)
ShiftedMass = CP.Assemble(ShiftedMass)
kspF = NSprecondSetup.LSCKSPnonlinear(ShiftedMass)
stime = time.time()
u, mits,nsits = S.solve(A,b,u,params,W,'Direct',IterType,OuterTol,InnerTol,HiptmairMatrices,Hiptmairtol,KSPlinearfluids, Fp,kspF)
Soltime = time.time()- stime
MO.StrTimePrint("MHD solve, time: ", Soltime)
Mits += mits
NSits += nsits
SolutionTime += Soltime
# print x.array + u.array
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(),Lagrange.dim()]
import pandas as pd
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,Mave,NSave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable.to_latex()
b = b_k.vector().array()
b = b/np.linalg.norm(b)
B = Function(Magnetic)
B.vector()[:] = b
p = plot(u_k)
p.write_png()
p = plot(p_k)
p.write_png()
p = plot(b_k)
p.write_png()
p = plot(r_k)
p.write_png()
file1 = File("solutions/u.pvd")
file2 = File("solutions/p.pvd")
file3 = File("solutions/b.pvd")
file4 = File("solutions/r.pvd")
file1 << u_k
file2 << p_k
file3 << b_k
file4 << r_k
ssss
interactive()
| mit |
costypetrisor/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
ioam/svn-history | topo/tkgui/topoconsole.py | 1 | 29735 | """
TopoConsole class file.
$Id$
"""
__version__='$Revision$'
# CB: does the status bar need to keep saying 'ok'? Sometimes
# positive feedback is useful, but 'ok' doesn't seem too helpful.
import os
import copy
import sys
import __main__
import webbrowser
import string
from Tkinter import Frame, Button, \
LEFT, YES, Label, DISABLED, \
NORMAL, DoubleVar
from tkFileDialog import asksaveasfilename,askopenfilename
import param
from param import normalize_path,resolve_path
import paramtk as tk
import topo
from topo.plotting.plotgroup import plotgroups, FeatureCurvePlotGroup
from topo.misc.keyedlist import KeyedList
from topo.misc.commandline import sim_name_from_filename
import topo.misc.genexamples
import topo.command
import topo.tkgui
from templateplotgrouppanel import TemplatePlotGroupPanel
from featurecurvepanel import FeatureCurvePanel
from projectionpanel import CFProjectionPanel,ProjectionActivityPanel,ConnectionFieldsPanel,RFProjectionPanel
from testpattern import TestPattern
from editor import ModelEditor
tk.AppWindow.window_icon_path = resolve_path('tkgui/icons/topo.xbm')
SCRIPT_FILETYPES = [('Topographica scripts','*.ty'),
('Python scripts','*.py'),
('All files','*')]
SAVED_FILE_EXTENSION = '.typ'
SAVED_FILETYPES = [('Topographica saved networks',
'*'+SAVED_FILE_EXTENSION),
('All files','*')]
turl = "http://topographica.org/"
userman = "User_Manual/index.html"
tuts = "Tutorials/index.html"
refman = "Reference_Manual/index.html"
plotman = "User_Manual/plotting.html"
# for deb on ubuntu; will need to check others
pkgdoc = "/usr/share/doc/topographica/doc/"
# Documentation locations: locally built and web urls.
user_manual_locations = ('doc/'+userman,
pkgdoc+userman,
turl+userman)
tutorials_locations = ('doc/'+tuts,
pkgdoc+tuts,
turl+tuts)
reference_manual_locations = ('doc/'+refman,
pkgdoc+refman,
turl+refman)
python_doc_locations = ('http://www.python.org/doc/',)
topo_www_locations = (turl,)
plotting_help_locations = ('doc/'+plotman,
pkgdoc+plotman,
turl+plotman)
# If a particular plotgroup_template needs (or works better with) a
# specific subclass of PlotPanel, the writer of the new subclass
# or the plotgroup_template can declare here that that template
# should use a specific PlotPanel subclass. For example:
# plotpanel_classes['Hue Pref Map'] = HuePreferencePanel
plotpanel_classes = {}
# CEBALERT: why are the other plotpanel_classes updates at the end of this file?
def open_plotgroup_panel(class_,plotgroup=None,**kw):
if class_.valid_context():
win = topo.guimain.some_area.new_window()
panel = class_(win,plotgroup=plotgroup,**kw)
if not panel.dock:
topo.guimain.some_area.eject(win)
else:
topo.guimain.some_area.consume(win)
panel.refresh_title()
panel.pack(expand='yes',fill='both')
win.sizeright()
#frame.sizeright()
#topo.guimain.messageBar.message('state', 'OK')
return panel
else:
topo.guimain.messageBar.response(
'No suitable objects in this simulation for this operation.')
class PlotsMenuEntry(param.Parameterized):
"""
Stores information about a Plots menu command
(including the command itself, and the plotgroup template).
"""
def __init__(self,plotgroup,class_=TemplatePlotGroupPanel,**params):
"""
Store the template, and set the class that will be created by this menu entry
If users want to extend the Plot Panel classes, then they
should add entries to the plotpanel_classes dictionary.
If no entry is defined there, then the default class is used.
The class_ is overridden for any special cases listed in this method.
"""
super(PlotsMenuEntry,self).__init__(**params)
self.plotgroup = plotgroup
# Special cases. These classes are specific to the topo/tkgui
# directory and therefore this link must be made within the tkgui
# files.
if isinstance(self.plotgroup,FeatureCurvePlotGroup):
class_ = plotpanel_classes.get(self.plotgroup.name,FeatureCurvePanel)
self.class_ = plotpanel_classes.get(self.plotgroup.name,class_)
def __call__(self,event=None,**kw):
"""
Instantiate the class_ (used as menu commands' 'command' attribute).
Keyword args are passed to the class_.
"""
new_plotgroup = copy.deepcopy(self.plotgroup)
# CB: hack to share plot_templates with the current
# plotgroup in plotgroups
new_plotgroup.plot_templates = topo.plotting.plotgroup.plotgroups[self.plotgroup.name].plot_templates
return open_plotgroup_panel(self.class_,new_plotgroup,**kw)
# Notebook only available for Tkinter>=8.5
try:
from paramtk.tilewrapper import Notebook
class DockManager(Notebook):
"""Manages windows that can be tabs in a notebook, or toplevels."""
def __init__(self, master=None, cnf={}, **kw):
Notebook.__init__(self, master, cnf=cnf, **kw)
self._tab_ids = {}
def _set_tab_title(self,win,title):
self.tab(self._tab_ids[win],text=title)
def _set_toplevel_title(self,win,title):
prefix = topo.sim.name+": "
if not title.startswith(prefix):
title=prefix+title
self.tk.call("wm","title",win._w,title)
def add(self, child, cnf={}, **kw):
self._tab_ids[child]=len(self.tabs())
Notebook.add(self,child,cnf=cnf,**kw)
## def unhide(self,win):
## if win in self._tab_ids:
## self.tab(self._tab_ids[win],state='normal')
def new_window(self):
win = tk.AppWindow(self,status=True)
#self.consume(win)
return win
def consume(self,win):
if win not in self._tab_ids:
self.tk.call('wm','forget',win._w)
win.title = lambda x: self._set_tab_title(win,x)
self.add(win)
def eject(self,win):
if win in self._tab_ids:
self.forget(self._tab_ids[win])
# manage my tab ids (HACK)
del self._tab_ids[win]
for w in self._tab_ids:
self._tab_ids[w]-=1
self._tab_ids[w]=max(self._tab_ids[w],0)
self.tk.call('wm','manage',win._w)
win.renew()
win.title = lambda x: self._set_toplevel_title(win,x)
return win
except ImportError:
class FakeDockManager(Frame):
def _set_tab_title(self,*args):
pass
def _set_toplevel_title(self,win,title):
prefix = topo.sim.name+": "
if not title.startswith(prefix):
title=prefix+title
self.tk.call("wm","title",win._w,title)
def add(self,*args):
pass
def new_window(self):
win = tk.AppWindow(self,status=True)
return win
def consume(self,win):
pass
def eject(self,win):
win.renew()
win.title = lambda x: self._set_toplevel_title(win,x)
return win
DockManager = FakeDockManager
# This is really a hack. There doesn't seem to be any easy way to tie
# an exception to the window from which it originated. (I couldn't
# find an example of tkinter software displaying a gui exception on
# the originating window.)
def _tkinter_report_exception(widget):
exc, val, tb = sys.exc_type, sys.exc_value, sys.exc_traceback
msg = "(%s) %s"%(exc.__name__,val)
# If the supplied widget has no master, it's probably the Tk
# instance. In that case, resort to the 'last-one-set' hack (see
# CEBALERT "provide a way of allowing other gui components" in
# topo/param/tk.py).
if not widget.master:
widget = tk._last_one_set
stat = None
while (widget is not None and widget.master):
# CEBALERT: should rename all status bars to the same thing
# (status_bar)
if hasattr(widget,'status'):
stat = widget.status
break
elif hasattr(widget,'messageBar'):
stat = widget.messageBar
break
widget = widget.master
if stat is not None:
stat.error('%s'%msg)
else:
topo.guimain.messageBar.error('%s'%msg)
# BK-NOTE: Default is now to display full trace always. Any user
# errors should be caught as special exception cases
# BK-ALERT: Want to raise errors vs print, however this currently crashes ipython.
#raise
param.Parameterized().warning(msg)
import traceback
traceback.print_exc()
import Tkinter
class TopoConsole(tk.AppWindow,tk.TkParameterized):
"""
Main window for the Tk-based GUI.
"""
def _getmenubar(self):
return self.master.menubar
menubar = property(_getmenubar)
def __getitem__(self,menu_name):
"""Allow dictionary-style access to the menu bar."""
return self.menubar[menu_name]
def __init__(self,root,**params):
tk.AppWindow.__init__(self,root,status=True)
tk.TkParameterized.__init__(self,root,**params)
# Instead of displaying tracebacks on the commandline, try to display
# them on the originating window.
# CEBALERT: on destroy(), ought to revert this
Tkinter.Misc._report_exception=_tkinter_report_exception
self.auto_refresh_panels = []
self._init_widgets()
self.title(topo.sim.name) # If -g passed *before* scripts on commandline, this is useless.
# So topo.misc.commandline sets the title as its last action (if -g)
# catch click on the 'x': offers choice to quit or not
self.protocol("WM_DELETE_WINDOW",self.quit_topographica)
##########
### Make cascade menus open automatically on linux when the mouse
### is over the menu title.
### [Tkinter-discuss] Cascade menu issue
### http://mail.python.org/pipermail/tkinter-discuss/2006-August/000864.html
if topo.tkgui.system_platform is 'linux':
activate_cascade = """\
if {[%W cget -type] != {menubar} && [%W type active] == {cascade}} {
%W postcascade active
}
"""
self.bind_class("Menu", "<<MenuSelect>>", activate_cascade)
##########
# Install warning and message handling
from param.parameterized import Parameterized
self.__orig_P_warning = Parameterized.warning
self.__orig_P_message = Parameterized.message
type.__setattr__(Parameterized,'warning',self.gui_warning)
type.__setattr__(Parameterized,'message',self.gui_message)
def gui_warning(self,*args):
stat = self.__get_status_bar()
s = string.join(args,' ')
stat.warn(s)
self.__orig_P_warning(self,*args)
def gui_message(self,*args):
stat = self.__get_status_bar()
s = string.join(args,' ')
stat.message(s)
self.__orig_P_message(self,*args)
def title(self,t=None):
newtitle = "Topographica"
if t: newtitle+=": %s" % t
tk.AppWindow.title(self,newtitle)
def _init_widgets(self):
## CEBALERT: now we can have multiple operations at the same time,
## status bar could be improved to show all tasks?
# CEBALERT
self.messageBar = self.status
self.some_area = DockManager(self)
self.some_area.pack(fill="both", expand=1)
### Balloon, for pop-up help
self.balloon = tk.Balloon(self.content)
### Top-level (native) menu bar
#self.menubar = tk.ControllableMenu(self.content)
self.configure(menu=self.menubar)
#self.menu_balloon = Balloon(topo.tkgui.root)
# no menubar in tile yet
# http://news.hping.org/comp.lang.tcl.archive/4679.html
self.__simulation_menu()
self.__create_plots_menu()
self.refresh_plots_menu()
self.__help_menu()
### Running the simulation
run_frame = Frame(self.content)
run_frame.pack(side='top',fill='x',padx=4,pady=8)
self.run_frame = run_frame
Label(run_frame,text='Run for: ').pack(side=LEFT)
self.run_for_var=DoubleVar()
self.run_for_var.set(1.0)
run_for = tk.TaggedSlider(run_frame,
variable=self.run_for_var,
tag_width=11,
slider_length=150,
bounds=(0,20000))
self.balloon.bind(run_for,"Duration to run the simulation, e.g. 0.0500, 1.0, or 20000.")
run_for.pack(side=LEFT,fill='x',expand=YES)
run_for.tag.bind("<Return>",self.run_simulation)
# When return is pressed, the TaggedSlider updates itself...but we also want to run
# the simulation in this case.
run_frame.optional_action=self.run_simulation
go_button = Button(run_frame,text="Go",
command=self.run_simulation)
go_button.pack(side=LEFT)
self.balloon.bind(go_button,"Run the simulation for the specified duration.")
self.step_button = Button(run_frame,text="Step",command=self.run_step)
self.balloon.bind(self.step_button,"Run the simulation through the time at which the next events are processed.")
self.step_button.pack(side=LEFT)
self.sizeright()
def __simulation_menu(self):
"""Add the simulation menu options to the menubar."""
simulation_menu = ControllableMenu(self.menubar,tearoff=0)
self.menubar.add_cascade(label='Simulation',menu=simulation_menu)
simulation_menu.add_command(label='Run script',command=self.run_script)
simulation_menu.add_command(label='Save script',command=self.save_script_repr)
simulation_menu.add_command(label='Load snapshot',command=self.load_snapshot)
simulation_menu.add_command(label='Save snapshot',command=self.save_snapshot)
#simulation_menu.add_command(label='Reset',command=self.reset_network)
simulation_menu.add_command(label='Test Pattern',command=self.open_test_pattern)
simulation_menu.add_command(label='Model Editor',command=self.open_model_editor)
simulation_menu.add_command(label='Quit',command=self.quit_topographica)
def open_test_pattern(self):
return open_plotgroup_panel(TestPattern)
def __create_plots_menu(self):
"""
Add the plot menu to the menubar, with Basic plots on the menu itself and
others in cascades by category (the plots come from plotgroup_templates).
"""
plots_menu = ControllableMenu(self.menubar,tearoff=0)
self.menubar.add_cascade(label='Plots',menu=plots_menu)
# CEBALERT: should split other menus in same way as plots (create/refresh)
def refresh_plots_menu(self):
plots_menu = self['Plots']
plots_menu.delete(0,'end')
# create menu entries, and get list of categories
entries=KeyedList() # keep the order of plotgroup_templates (which is also KL)
categories = []
for label,plotgroup in plotgroups.items():
entries[label] = PlotsMenuEntry(plotgroup)
categories.append(plotgroup.category)
categories = sorted(set(categories))
# The Basic category items appear on the menu itself.
assert 'Basic' in categories, "'Basic' is the category for the standard Plots menu entries."
for label,entry in entries:
if entry.plotgroup.category=='Basic':
plots_menu.add_command(label=label,command=entry.__call__)
categories.remove('Basic')
plots_menu.add_separator()
# Add the other categories to the menu as cascades, and the plots of each category to
# their cascades.
for category in categories:
category_menu = ControllableMenu(plots_menu,tearoff=0)
plots_menu.add_cascade(label=category,menu=category_menu)
# could probably search more efficiently than this
for label,entry in sorted(entries):
if entry.plotgroup.category==category:
category_menu.add_command(label=label,command=entry.__call__)
plots_menu.add_separator()
plots_menu.add_command(label="Help",command=(lambda x=plotting_help_locations: self.open_location(x)))
def __help_menu(self):
"""Add the help menu options."""
help_menu = ControllableMenu(self.menubar,tearoff=0,name='help')
self.menubar.add_cascade(label='Help',menu=help_menu)
help_menu.add_command(label='About',command=self.new_about_window)
help_menu.add_command(label="User Manual",
command=(lambda x=user_manual_locations: self.open_location(x)))
help_menu.add_command(label="Tutorials",
command=(lambda x=tutorials_locations: self.open_location(x)))
help_menu.add_command(label="Examples",
command=self.run_example_script)
help_menu.add_command(label="Reference Manual",
command=(lambda x=reference_manual_locations: self.open_location(x)))
help_menu.add_command(label="Topographica.org",
command=(lambda x=topo_www_locations: self.open_location(x)))
help_menu.add_command(label="Python documentation",
command=(lambda x=python_doc_locations: self.open_location(x)))
def quit_topographica(self,check=True,exit_status=0):
"""Quit topographica."""
if not check or (check and tk.askyesno("Quit Topographica","Really quit?")):
self.destroy()
# matplotlib's tk backend starts its own Tk instances; we
# need to close these ourselves (at least to avoid error
# message about 'unusual termination' in Windows).
try: # not that there should be an error, but just in case...
import matplotlib._pylab_helpers
for figman in matplotlib._pylab_helpers.Gcf.get_all_fig_managers():
figman.destroy()
except:
pass
print "Quit selected; exiting"
# Workaround for obscure problem on some UNIX systems
# as of 4/2007, probably including Fedora Core 5.
# On these systems, if Topographica is started from a
# bash prompt and then quit from the Tkinter GUI (as
# opposed to using Ctrl-D in the terminal), the
# terminal would suppress echoing of all future user
# input. stty sane restores the terminal to sanity,
# but it is not clear why this is necessary.
# For more info:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/68d0f33c8eb2e02d
if topo.tkgui.system_platform=="linux" and os.getenv('EMACS')!='t':
try: os.system("stty sane")
except: pass
# CEBALERT: re. above. Shouldn't we be able to store the
# output of "stty --save" before starting the gui, then
# ensure that when the gui exits (however badly it
# happens) run "stty saved_settings"?
# CEBALERT: there was no call to self.master.destroy()
sys.exit(exit_status)
def run_script(self):
"""
Dialog to run a user-selected script
The script is exec'd in __main__.__dict__ (i.e. as if it were specified on the commandline.)
"""
script = askopenfilename(initialdir=normalize_path(),filetypes=SCRIPT_FILETYPES)
if script in ('',(),None): # (representing the various ways no script was selected in the dialog)
self.messageBar.response('Run canceled')
else:
execfile(script,__main__.__dict__)
self.messageBar.response('Ran ' + script)
sim_name_from_filename(script)
self.title(topo.sim.name)
# CEBALERT: duplicates most of run_script()
def run_example_script(self):
script = askopenfilename(initialdir=topo.misc.genexamples.find_examples(),
filetypes=SCRIPT_FILETYPES)
if script in ('',(),None): # (representing the various ways no script was selected in the dialog)
self.messageBar.response('No example opened')
else:
execfile(script,__main__.__dict__)
self.messageBar.response('Ran ' + script)
sim_name_from_filename(script)
self.title(topo.sim.name)
def save_script_repr(self):
script_name = asksaveasfilename(filetypes=SCRIPT_FILETYPES,
initialdir=normalize_path(),
initialfile=topo.sim.basename()+"_script_repr.ty")
if script_name:
topo.command.save_script_repr(script_name)
self.messageBar.response('Script saved to ' + script_name)
def load_snapshot(self):
"""
Dialog to load a user-selected snapshot (see topo.command.load_snapshot() ).
"""
snapshot_name = askopenfilename(initialdir=normalize_path(),filetypes=SAVED_FILETYPES)
if snapshot_name in ('',(),None):
self.messageBar.response('No snapshot loaded.')
else:
self.messageBar.dynamicinfo('Loading snapshot (may take some time)...')
self.update_idletasks()
topo.command.load_snapshot(snapshot_name)
self.messageBar.response('Loaded snapshot ' + snapshot_name)
self.title(topo.sim.name)
self.auto_refresh()
def save_snapshot(self):
"""
Dialog to save a snapshot (see topo.command.save_snapshot() ).
Adds the file extension .typ if not already present.
"""
snapshot_name = asksaveasfilename(filetypes=SAVED_FILETYPES,
initialdir=normalize_path(),
initialfile=topo.sim.basename()+".typ")
if snapshot_name in ('',(),None):
self.messageBar.response('No snapshot saved.')
else:
if not snapshot_name.endswith('.typ'):
snapshot_name = snapshot_name + SAVED_FILE_EXTENSION
self.messageBar.dynamicinfo('Saving snapshot (may take some time)...')
self.update_idletasks()
topo.command.save_snapshot(snapshot_name)
self.messageBar.response('Snapshot saved to ' + snapshot_name)
def auto_refresh(self):
"""
Refresh all windows in auto_refresh_panels.
Panels can add and remove themselves to the list; those in the list
will have their refresh() method called whenever this console's
autorefresh() is called.
"""
for win in self.auto_refresh_panels:
win.refresh()
self.set_step_button_state()
self.update_idletasks()
### CEBERRORALERT: why doesn't updatecommand("display=True") for an
### orientation preference map measurement work with the
### hierarchical example? I guess this is the reason I thought the
### updating never worked properly (or I really did break it
### recently - or I'm confused)...
def refresh_activity_windows(self):
"""
Update any windows with a plotgroup_key of 'Activity'.
Used primarily for debugging long scripts that present a lot of activity patterns.
"""
for win in self.auto_refresh_panels:
if win.plotgroup.name=='Activity' or win.plotgroup.name=='ProjectionActivity' :
win.refresh()
self.update_idletasks()
def open_model_editor(self):
"""Start the Model editor."""
return ModelEditor(self)
def new_about_window(self):
win = tk.AppWindow(self)
win.withdraw()
win.title("About Topographica")
text = Label(win,text=topo.about(display=False),justify=LEFT)
text.pack(side=LEFT)
win.deiconify()
#self.messageBar.message('state', 'OK')
def open_location(self, locations):
"""
Try to open one of the specified locations in a new window of the default
browser. See webbrowser module for more information.
locations should be a tuple.
"""
# CB: could have been a list. This is only here because if locations is set
# to a string, it will loop over the characters of the string.
assert isinstance(locations,tuple),"locations must be a tuple."
for location in locations:
try:
existing_location = resolve_path(location)
webbrowser.open(existing_location,new=2,autoraise=True)
self.messageBar.response('Opened local file '+existing_location+' in browser.')
return ###
except:
pass
for location in locations:
if location.startswith('http'):
try:
webbrowser.open(location,new=2,autoraise=True)
self.messageBar.response('Opened remote location '+location+' in browser.')
return ###
except:
pass
self.messageBar.response("Could not open any of %s in a browser."%locations)
# CEBALERT: need to take care of removing old messages automatically?
# (Otherwise callers might always have to pass 'ok'.)
def status_message(self,m):
self.messageBar.response(m)
def run_simulation(self,event=None): # event=None allows use as callback
"""
Run the simulation for the duration specified in the
'run for' taggedslider.
"""
fduration = self.run_for_var.get()
self.open_progress_window(timer=topo.sim.timer)
topo.sim.run_and_time(fduration)
self.auto_refresh()
# CEBERRORALERT: Step button does strange things at time==0.
# E.g. for lissom_oo_or, nothing appears to happen. For
# hierarchical, runs to time==10.
def run_step(self):
if not topo.sim.events:
# JP: step button should be disabled if there are no events,
# but just in case...
return
# JPALERT: This should really use .run_and_time() but it doesn't support
# run(until=...)
topo.sim.run(until=topo.sim.events[0].time)
self.auto_refresh()
def set_step_button_state(self):
if topo.sim.events:
self.step_button.config(state=NORMAL)
else:
self.step_button.config(state=DISABLED)
def __get_status_bar(self,i=2):
# Hack to find appropriate status bar: Go back through frames
# until a widget with a status bar is found, and return it.
try:
while True:
f = sys._getframe(i)
if hasattr(f,'f_locals'):
if 'self' in f.f_locals:
o = f.f_locals['self']
# (temporary hack til ScrolledFrame cleaned up)
if o.__class__.__name__!='ScrolledFrame':
if hasattr(o,'messageBar'):
return o.messageBar
elif hasattr(o,'status'):
return o.status
i+=1
except:
pass
#print "GUI INTERNAL WARNING: failed to determine window on which to display message."
return self.messageBar
def open_progress_window(self,timer,title=None):
"""
Provide a convenient link to progress bars.
"""
stat = self.__get_status_bar()
return stat.open_progress_window(timer=timer,sim=topo.sim)
# CEBALERT: of course dictionary access is used as an alternative to
# the config method or whatever it's called! So this could cause
# serious confusion to someone trying to set config options using the
# dictionary style access rather than .config()! Either document
# clearly or abandon, and get() and set() to access menu entries by
# name.
class ControllableMenu(tk.Menu):
"""
A Menu, but where entries are accessible by name (using
dictionary-style access).
** Not truly compatible with Tkinter; work in progress **
"""
def __getitem__(self,name):
return self.named_commands[name]
if __name__ != '__main__':
plotpanel_classes['Connection Fields'] = ConnectionFieldsPanel
plotpanel_classes['RF Projection'] = RFProjectionPanel
plotpanel_classes['Projection'] = CFProjectionPanel
plotpanel_classes['Projection Activity'] = ProjectionActivityPanel
| bsd-3-clause |
0Steve0/Kaggle | Leaf Classfication/Leaf Classfication in Random Forest/Random Forest Benchmark.py | 1 | 1123 | #import
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
# load training data
traindata = pd.read_csv('C:/Users/sound/Desktop/Kaggle/Leaf Classfication/data/train.csv')
x_train = traindata.values[:, 2:]
y_train = traindata.values[:, 1]
#set the number of trees in random forest
num_trees = [10, 50, 100, 200, 300, 400, 500]
#calculate the cross validation scores and std
cr_val_scores = list()
cr_val_scores_std = list()
for n_tree in num_trees:
recognizer = RandomForestClassifier(n_tree)
cr_val_score = cross_val_score(recognizer, x_train, y_train)
cr_val_scores.append(np.mean(cr_val_score))
cr_val_scores_std.append(np.std(cr_val_score))
#plot cross_val_score and std
sc_array = np.array(cr_val_scores)
std_array = np.array(cr_val_scores_std)
plt.plot(num_trees, cr_val_scores)
plt.plot(num_trees, sc_array + std_array, 'b--')
plt.plot(num_trees, sc_array - std_array, 'b--')
plt.ylabel('cross_val_scores')
plt.xlabel('num_of_trees')
plt.savefig('random_forest_benchmark.png')
| apache-2.0 |
DucQuang1/BuildingMachineLearningSystemsWithPython | ch10/neighbors.py | 21 | 1787 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
import numpy as np
import mahotas as mh
from glob import glob
from features import texture, color_histogram
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
from scipy.spatial import distance
basedir = '../SimpleImageDataset/'
haralicks = []
chists = []
print('Computing features...')
# Use glob to get all the images
images = glob('{}/*.jpg'.format(basedir))
# We sort the images to ensure that they are always processed in the same order
# Otherwise, this would introduce some variation just based on the random
# ordering that the filesystem uses
images.sort()
for fname in images:
imc = mh.imread(fname)
imc = imc[200:-200,200:-200]
haralicks.append(texture(mh.colors.rgb2grey(imc)))
chists.append(color_histogram(imc))
haralicks = np.array(haralicks)
chists = np.array(chists)
features = np.hstack([chists, haralicks])
print('Computing neighbors...')
sc = StandardScaler()
features = sc.fit_transform(features)
dists = distance.squareform(distance.pdist(features))
print('Plotting...')
fig, axes = plt.subplots(2, 9, figsize=(16,8))
# Remove ticks from all subplots
for ax in axes.flat:
ax.set_xticks([])
ax.set_yticks([])
for ci,i in enumerate(range(0,90,10)):
left = images[i]
dists_left = dists[i]
right = dists_left.argsort()
# right[0] is the same as left[i], so pick the next closest element
right = right[1]
right = images[right]
left = mh.imread(left)
right = mh.imread(right)
axes[0, ci].imshow(left)
axes[1, ci].imshow(right)
fig.tight_layout()
fig.savefig('figure_neighbors.png', dpi=300)
| mit |
ratschlab/ASP | applications/edrt/lle_auto_k.py | 1 | 1501 | from modshogun import *
import numpy
numpy.random.seed(40)
N = 2000
tt = numpy.array((numpy.pi)*(3+2*numpy.random.rand(N)))
height = numpy.array(numpy.random.rand(N)-0.5)
X = numpy.array([tt*numpy.cos(tt), 10*height, tt*numpy.sin(tt)])
preprocs = []
lle = LocallyLinearEmbedding()
lle.set_k(9)
preprocs.append((lle, "LLE preset k"))
lle_adaptive_k = LocallyLinearEmbedding()
lle_adaptive_k.set_k(3)
lle_adaptive_k.set_max_k(40)
lle_adaptive_k.parallel.set_num_threads(1)
lle_adaptive_k.set_auto_k(True)
preprocs.append((lle_adaptive_k, "LLE auto k"))
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
new_mpl = False
try:
swiss_roll_fig = fig.add_subplot(1,3,1, projection='3d')
new_mpl = True
except:
figure = plt.figure()
swiss_roll_fig = Axes3D(figure)
swiss_roll_fig.scatter(X[0], X[1], X[2], s=10, c=tt, cmap=plt.cm.Spectral)
plt.subplots_adjust(wspace=0.3)
plt.title('3D data')
from shogun.Features import RealFeatures
for (i, (preproc, label)) in enumerate(preprocs):
features = RealFeatures(X)
preproc.set_target_dim(2)
preproc.io.set_loglevel(MSG_DEBUG)
new_feats = preproc.embed(features).get_feature_matrix()
if not new_mpl:
preproc_subplot = fig.add_subplot(1,3,i+1)
else:
preproc_subplot = fig.add_subplot(1,3,i+2)
preproc_subplot.scatter(new_feats[0],new_feats[1], c=tt, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title(label + ' (k=%d)' % preproc.get_k())
plt.show()
| gpl-2.0 |
jorge2703/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
equialgo/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 108 | 2026 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
vondrejc/FFTHomPy | tutorials/01_trig_pol.py | 1 | 5510 | from __future__ import division, print_function
print("""
This tutorial explains the usage of trigonometric polynomials and relating
operators for the use in FFT-based homogenization.
The basic classes, which are implemented in module "homogenize.matvec",
are listed here along with their important characteristics:
Grid : contains method "Grid.get_grid_coordinates", which returns coordinates
of grid points
Tensor : this class represents a tensor-valued trigonometric polynomial and is
thus the most important part of FFT-based homogenization
DFT : this class represents matrices of Discrete Fourier Transform, which is
implemented via central version of FFT algorithm
----""")
import os
import sys
sys.path.insert(0, os.path.normpath(os.path.join(sys.path[0], '..')))
import numpy as np
from ffthompy.trigpol import Grid
from ffthompy.tensors import Tensor, DFT
print("""
The work with trigonometric polynomials is shown for""")
d = 2
N = 5*np.ones(d, dtype=np.int32)
print('dimension d =', d)
print('number of grid points N =', N)
print('which is implemented as a numpy.ndarray.')
print("""
Particularly, the vector-valued trigonometric polynomial is created as an instance 'xN' of class
'Tensor' and the random values are assigned.
""")
xN = Tensor(name='trigpol_rand', shape=(d,), N=N)
xN.randomize()
print("""
Basic properties of a trigonometric polynomials can be printed with a norm
corresponding to L2 norm of trigonometric polynomial, i.e.
xN =""")
print(xN)
print("""
The values of trigonometric polynomials are stored in atribute val of type
numpy.ndarray with shape = (self.d,) + tuple(self.N), i.e.
xN.val.shape =""")
print(xN.val.shape)
print("xN.val = xN[:] =")
print(xN.val)
print("""
In order to calculate Fourier coefficients of trigonometric polynomial,
we define DFT operators that are provided in class 'DFT'. The operation
is provided by central version of FFT algorithm and is implemented in method
'DFT.__call__' and/or 'DFT.__mul__'.
""")
FN = DFT(name='forward DFT', N=N, inverese=False)
FiN = DFT(name='inverse DFT', N=N, inverse=True)
print("FN = ")
print(FN)
print("FiN = ")
print(FiN)
print("""
The result of DFT is again the same trigonometric polynomial
with representation in Fourier domain (with Fourier coefficients);
FxN = FN*xN = FN(xN) =""")
FxN = FN*xN # Fourier coefficients of xN
print(FxN)
print("""
The forward and inverse DFT are mutually inverse operations that can
be observed by calculation of variable 'xN2':
xN2 = FiN(FxN) = FiN(FN(xN)) =""")
xN2 = FiN(FxN) # values of trigonometric polynomial at grid points
print(xN2)
print("and its comparison with initial trigonometric polynomial 'xN2'")
print("(xN == xN2) = ")
print(xN == xN2)
print("""
The norm of trigonometric polynomial calculated from Fourier
coefficients corresponds to L^2 norm and is the same like for values at grid
points, which is a consequence of Parseval's identity:
xN.norm() = np.linalg.norm(xN.val)/np.prod(xN.N)**0.5 =
= (np.sum(xN.val*xN.val)/np.prod(xN.N))**0.5 = """)
print(xN.norm())
print("""FxN.norm() = np.linalg.norm(FxN.val) =
= np.sum(FxN.val*np.conj(FxN.val)).real**0.5 =""")
print(FxN.norm())
print("""
The trigonometric polynomials can be also multiplied. The standard
multiplication with '*' operations corresponds to scalar product
leading to a square of norm, i.e.
FxN.norm() = xN.norm() = (xN*xN)**0.5 = (FxN*FxN)**0.5 =""")
print((xN*xN)**0.5)
print((FxN*FxN)**0.5)
print("""
The mean value of trigonometric polynomial is calculated independently for
each component of vector-field of trigonometric polynomial. In the real space,
it can be calculated as a mean of trigonometric polynomial at grid points,
while in Fourier space, it corresponds to zero frequency placed in the
center of grid, i.e.
xN.mean()[0] = xN[0].mean() = xN.val[0].mean() = FxN[0, 2, 2].real =""")
print(xN.mean()[0])
print(xN[0].mean())
print(xN.val[0].mean())
print(FxN[0, 2, 2].real)
print("""========================
Finally, we will plot the fundamental trigonometric polynomial, which
satisfies dirac-delta property at grid points and which
plays a major way in a theory of FFT-based homogenization.
phi =""")
phi = Tensor(name='phi_N,k', N=N, shape=())
phi.val[2, 2] = 1
print(phi)
print("phi.val =")
print(phi.val)
print("""
Fourier coefficients of phi
Fphi = FN*phi = FN(phi) =""")
Fphi = FN*phi
print(Fphi)
print("Fphi.val =")
print(Fphi.val)
print("""
In order to create a plot of this polynomial, it is
evaluated on a fine grid sizing
M = 16*N =""")
M = 16*N
print(M)
print("phi_fine = phi.project(M) =")
phi_fine = phi.project(M)
print(phi_fine)
print("""The procedure is provided by VecTri.enlarge(M) function, which consists of
a calculation of Fourier coefficients, putting zeros to Fourier coefficients
with high frequencies, and inverse FFT that evaluates the polynomial on
a fine grid.
""")
print("""In order to plot this polynomial, we also set a size of a cell
Y =""")
Y = np.ones(d) # size of a cell
print(Y)
print(""" and evaluate the coordinates of grid points, which are stored in
numpy.ndarray of following shape:
coord.shape =""")
coord = Grid.get_coordinates(M, Y)
print(coord.shape)
if __name__ == "__main__":
print("""
Now, the plot of fundamental trigonometric polynomial is shown:""")
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(coord[0], coord[1], phi_fine.val)
plt.show()
print('END')
| mit |
simonsfoundation/CaImAn | use_cases/eLife_scripts/Figure_7-1p_striatum.py | 2 | 7274 | # -*- coding: utf-8 -*-
"""
This script reproduces the results for Figure 7, analyzing 1p microendoscopic
data using the CaImAn implementation of the CNMF-E algorithm. The algorithm
using both a patch-based and a non patch-based approach and compares them with
the results obtained from the MATLAB implementation.
More info can be found in the companion paper
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from scipy.optimize import minimize
from scipy.sparse import csc_matrix
from scipy.stats import pearsonr
from scipy.ndimage import center_of_mass
from operator import itemgetter
import h5py
import caiman as cm
from caiman.base.rois import register_ROIs
from caiman.source_extraction import cnmf
import cv2
import os
# data from https://www.dropbox.com/sh/6395g5wwlv63f0s/AACTNVivxYs7IIyeS67SdV2Qa?dl=0
base_folder = '/mnt/ceph/neuro/DataForPublications/DATA_PAPER_ELIFE/WEBSITE'
fname = os.path.join(base_folder, 'blood_vessel_10Hz.mat')
Y = h5py.File(fname)['Y'].value.astype(np.float32)
gSig = 3 # gaussian width of a 2D gaussian kernel, which approximates a neuron
gSiz = 13 # average diameter of a neuron
#%% perform memory mapping and loading
fname_new = cm.save_memmap([Y], base_name='Yr', order='C')
Yr, dims, T = cm.load_memmap(fname_new)
Y = Yr.T.reshape((T,) + dims, order='F')
#%% run w/o patches
dview, n_processes = None, 2
cnm = cnmf.CNMF(n_processes=n_processes, method_init='corr_pnr', k=None, dview=dview,
gSig=(gSig, gSig), gSiz=(gSiz, gSiz), merge_thresh=.65, p=1, tsub=1, ssub=1,
only_init_patch=True, gnb=0, min_corr=.7, min_pnr=7, normalize_init=False,
ring_size_factor=1.4, center_psf=True, ssub_B=2, init_iter=1, s_min=-10)
cnm.fit(Y)
#%% run w/ patches
c, dview, n_processes = cm.cluster.setup_cluster(
backend='local', n_processes=None, single_thread=False)
cnmP = cnmf.CNMF(n_processes=n_processes, method_init='corr_pnr', k=None, dview=dview,
gSig=(gSig, gSig), gSiz=(gSiz, gSiz), merge_thresh=.65, p=1, tsub=1, ssub=1,
only_init_patch=True, gnb=0, min_corr=.7, min_pnr=7, normalize_init=False,
ring_size_factor=1.4, center_psf=True, ssub_B=2, init_iter=1, s_min=-10,
nb_patch=0, del_duplicates=True, rf=(64, 64), stride=(32, 32))
cnmP.fit(Y)
#%% DISCARD LOW QUALITY COMPONENT
def discard(cnm, final_frate=10,
r_values_min=0.1, # threshold on space consistency
fitness_min=-20, # threshold on time variability
# threshold on time variability (if nonsparse activity)
fitness_delta_min=-30,
Npeaks=10):
traces = cnm.estimates.C + cnm.estimates.YrA
idx_components, idx_components_bad = cm.components_evaluation.estimate_components_quality(
traces, Yr, cnm.estimates.A, cnm.estimates.C, cnm.estimates.b, cnm.estimates.f, final_frate=final_frate, Npeaks=Npeaks,
r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min)
print(('Keeping ' + str(len(idx_components)) +
' and discarding ' + str(len(idx_components_bad))))
A_ = cnm.estimates.A[:, idx_components]
C_ = cnm.estimates.C[idx_components]
return A_, C_, traces[idx_components]
A_, C_, traces = discard(cnm)
A_P, C_P, tracesP = discard(cnmP)
# DISCARD TOO SMALL COMPONENT
notsmall = np.sum(A_.toarray() > 0, 0) >= 125
A_ = A_[:, notsmall]
C_ = C_[notsmall]
notsmall = np.sum(A_P.toarray() > 0, 0) >= 125
A_P = A_P[:, notsmall]
C_P = C_P[notsmall]
# DISCARD TOO ECCENTRIC COMPONENT
def aspect_ratio(img):
M = cv2.moments(img)
cov = np.array([[M['mu20'], M['mu11']], [M['mu11'], M['mu02']]]) / M['m00']
EV = np.sort(np.linalg.eigh(cov)[0])
return np.sqrt(EV[1] / EV[0])
def keep_ecc(A, thresh=3):
ar = np.array([aspect_ratio(a.reshape(dims)) for a in A.T])
notecc = ar < thresh
centers = np.array([center_of_mass(a.reshape(dims)) for a in A.T])
border = np.array([c.min() < 3 or (np.array(dims) - c).min() < 3 for c in centers])
return notecc | border
keep = keep_ecc(A_.toarray())
A_ = A_[:, keep]
C_ = C_[keep]
keep = keep_ecc(A_P.toarray())
A_P = A_P[:, keep]
C_P = C_P[keep]
#%% load matlab results and match ROIs
A, C_raw, C = itemgetter('A', 'C_raw', 'C')(loadmat(
os.path.join(base_folder, 'results_bk.mat')))
A_ = csc_matrix(A_.toarray().reshape(
dims + (-1,), order='C').reshape((-1, A_.shape[-1]), order='F'))
A_P = csc_matrix(A_P.toarray().reshape(
dims + (-1,), order='C').reshape((-1, A_P.shape[-1]), order='F'))
ids = [616, 524, 452, 305, 256, 573, 181, 574, 575, 619]
def match(a, c):
matched_ROIs1, matched_ROIs2, non_matched1, non_matched2, performance, A2 = register_ROIs(
A, a, dims, align_flag=False, thresh_cost=.7)
cor = [pearsonr(c1, c2)[0] for (c1, c2) in
np.transpose([C[matched_ROIs1], c[matched_ROIs2]], (1, 0, 2))]
print(np.mean(cor), np.median(cor))
return matched_ROIs2[[list(matched_ROIs1).index(i) for i in ids]]
ids_ = match(A_, C_)
# {'f1_score': 0.9033778476040848, 'recall': 0.8468335787923417, 'precision': 0.968013468013468, 'accuracy': 0.8237822349570201}
# (0.7831526916134324, 0.8651673117308342)
ids_P = match(A_P, C_P)
# {'f1_score': 0.8979591836734694, 'recall': 0.8424153166421208, 'precision': 0.9613445378151261, 'accuracy': 0.8148148148148148}
# (0.7854095007286398, 0.870879114022712)
#%% plot ROIs and traces
cn_filter, pnr = cm.summary_images.correlation_pnr(
Y, gSig=gSig, center_psf=True, swap_dim=False)
fig = plt.figure(figsize=(30, 10))
fig.add_axes([0, 0, .33, 1])
plt.rc('lines', lw=1.2)
cm.utils.visualization.plot_contours(
A, cn_filter.T, thr=.6, vmax=0.95, colors='w', display_numbers=False)
cm.utils.visualization.plot_contours(
A_P[:, np.array([i not in ids_P for i in range(A_P.shape[1])])], cn_filter,
thr=.6, vmax=0.95, colors='r', display_numbers=False)
plt.rc('lines', lw=3.5)
for k, i in enumerate(ids_P):
cm.utils.visualization.plot_contours(
A_P[:, i], cn_filter.T, thr=.7, vmax=0.95, colors='C%d' % k, display_numbers=False)
plt.rc('lines', lw=1.5)
plt.plot([1, 2], zorder=-100, lw=3, c='w', label='Zhou et al.')
plt.plot([1, 2], zorder=-100, lw=3, c='r', label='patches')
lg = plt.legend(loc=(.44, .9), frameon=False, fontsize=20)
for text in lg.get_texts():
text.set_color('w')
plt.axis('off')
plt.xticks([])
plt.yticks([])
fig.add_axes([.33, 0, .67, 1])
for i, n in enumerate(ids):
plt.plot(.9 * C[n] / C[n].max() + (9 - i), c='k', lw=5.5, label='Zhou et al.')
a, b = minimize(lambda x:
np.sum((x[0] + x[1] * C_[ids_[i]] - C[n] / C[n].max())**2), (0, 1e-3)).x
plt.plot(.9 * (a + b * C_[ids_[i]]) + (9 - i), lw=4, c='cyan', label='no patches')
a, b = minimize(lambda x:
np.sum((x[0] + x[1] * C_P[ids_P[i]] - C[n] / C[n].max())**2), (0, 1e-3)).x
plt.plot(.9 * (a + b * C_P[ids_P[i]]) + (9 - i), lw=2.5, c='r', label='patches')
if i == 0:
plt.legend(ncol=3, loc=(.3, .96), frameon=False, fontsize=20, columnspacing=4)
plt.scatter([-100], [.45 + (9 - i)], c='C%d' % i, s=80)
plt.ylim(0, 10)
plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.xlim(-350, 6050)
plt.ylim(-.1, 10.2)
plt.show() | gpl-2.0 |
Dispersive-Hydrodynamics-Lab/PACE | PACE/PACE.py | 1 | 19319 | #!/usr/bin/env python3.5
"""
PACE
TODO:
* model training/testing
* more models (technically)
* multithreading
"""
import sys
import os
import argparse
import hashlib
import typing
from enforce import runtime_validation as types
from tqdm import tqdm
import numpy as np
import numpy.linalg as linalg
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import scipy.integrate as si
import scipy.io as sco
import sklearn as sk
from sklearn import svm
from sklearn import preprocessing
from sklearn import neighbors
DATASTORE = 'linefitdata.mat'
HEADER = (' ____ _ ____ _____\n'
'| _ \ / \ / ___| ____|\n'
'| |_) / _ \| | | _|\n'
'| __/ ___ \ |___| |___\n'
'|_| /_/ \_\____|_____|\n\n'
'PACE: Parameterization & Analysis of Conduit Edges\n'
'William Farmer - 2015\n')
def main():
args = get_args()
data = DataStore(DATASTORE)
data.load()
# Establish directory for img outputs
if not os.path.exists('./img'):
os.makedirs('./img')
if args.plot:
for filename in args.files:
print('Plotting ' + filename)
plot_name = './img/' + filename + '.general_fit.png'
fit = LineFit(filename)
fit.plot_file(name=plot_name, time=args.time)
if args.analyze:
for filename in args.files:
manage_file_analysis(args, filename, data)
if args.plotdata:
data.plot_traindata()
if args.machinetest:
learner = ML(algo=args.model)
if args.printdata:
data.printdata()
if args.printdatashort:
data.printshort()
@types
def manage_file_analysis(args: argparse.Namespace, filename: str, data: object) -> None:
"""
Take care of the analysis of a datafile
"""
key = DataStore.hashfile(filename)
print('Analyzing {} --> {}'.format(filename, key))
if data.check_key(key): # if exists in database, prepopulate
fit = LineFit(filename, data=data.get_data(key))
else:
fit = LineFit(filename)
if args.time:
noise, curvature, rnge, domn = fit.analyze(time=args.time)
newrow = [args.time, noise, curvature,
rnge, domn, fit.accepts[args.time]]
data.update1(key, newrow, len(fit.noises))
else:
fit.analyze_full()
newrows = np.array([range(len(fit.noises)), fit.noises,
fit.curves, fit.ranges, fit.domains, fit.accepts])
data.update(key, newrows)
data.save()
class DataStore(object):
def __init__(self, name: str):
"""
Uses a .mat as datastore for compatibility.
Eventually may want to switch to SQLite, or some database? Not sure if
ever needed. This class provides that extensible API structure however.
Datafile has the following structure:
learning_data = {filehash:[[trial_index, noise, curvature,
range, domain, accept, viscosity]
,...],...}
Conveniently, you can use the domain field as a check as to whether or
not the row has been touched. If domain=0 (for that row) then that
means that it hasn't been updated.
:param: name of datastore
"""
self.name = name
self.data = {}
def load(self) -> None:
"""
Load datafile
"""
try:
self.data = sco.loadmat(self.name)
except FileNotFoundError:
pass
def save(self) -> None:
"""
Save datafile to disk
"""
sco.savemat(self.name, self.data)
def get_data(self, key: str) -> np.ndarray:
"""
Returns the specified data. Warning, ZERO ERROR HANDLING
:param key: name of file
:return: 2d data array
"""
return self.data[key]
@types
def get_keys(self) -> typing.List[str]:
"""
Return list of SHA512 hash keys that exist in datafile
:return: list of keys
"""
keys = []
for key in self.data.keys():
if key not in ['__header__', '__version__', '__globals__']:
keys.append(key)
return keys
@types
def check_key(self, key: str) -> bool:
"""
Checks if key exists in datastore. True if yes, False if no.
:param: SHA512 hash key
:return: whether or key not exists in datastore
"""
keys = self.get_keys()
return key in keys
def get_traindata(self) -> np.ndarray:
"""
Pulls all available data and concatenates for model training
:return: 2d array of points
"""
traindata = None
for key, value in self.data.items():
if key not in ['__header__', '__version__', '__globals__']:
if traindata is None:
traindata = value[np.where(value[:, 4] != 0)]
else:
traindata = np.concatenate((traindata, value[np.where(value[:, 4] != 0)]))
return traindata
@types
def plot_traindata(self, name: str='dataplot') -> None:
"""
Plots traindata.... choo choo...
"""
traindata = self.get_traindata()
plt.figure(figsize=(16, 16))
plt.scatter(traindata[:, 1], traindata[:, 2],
c=traindata[:, 5], marker='o', label='Datastore Points')
plt.xlabel(r'$\log_{10}$ Noise')
plt.ylabel(r'$\log_{10}$ Curvature')
plt.legend(loc=2, fontsize='xx-large')
plt.savefig('./img/{}.png'.format(name))
def printdata(self) -> None:
""" Prints data to stdout """
np.set_printoptions(threshold=np.nan)
print(self.data)
np.set_printoptions(threshold=1000)
def printshort(self) -> None:
""" Print shortened version of data to stdout"""
print(self.data)
@types
def update(self, key: str, data: np.ndarray) -> None:
""" Update entry in datastore """
self.data[key] = data
def update1(self, key: str, data: np.ndarray, size: int) -> None:
""" Update one entry in specific record in datastore """
print(data)
if key in self.get_keys():
self.data[key][data[0]] = data
else:
newdata = np.zeros((size, 6))
newdata[data[0]] = data
self.data[key] = newdata
@staticmethod
@types
def hashfile(name: str) -> str:
"""
Gets a hash of a file using block parsing
http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file
Using SHA512 for long-term support (hehehehe)
"""
hasher = hashlib.sha512()
with open(name, 'rb') as openfile:
for chunk in iter(lambda: openfile.read(4096), b''):
hasher.update(chunk)
return hasher.hexdigest()
class LineFit(object):
def __init__(self, filename: str, data: np.ndarray=None,
function_number: int=16, spread_number: int=22):
"""
Main class for line fitting and parameter determination
:param: filename
:param: data for fitting
:param: number of functions
:param: gaussian spread number
"""
self.filename = filename
(self.averagedata, self.times,
self.accepts, self.ratio, self.viscosity) = self._loadedges()
self.domain = np.arange(len(self.averagedata[:, 0]))
self.function_number = function_number
self.spread_number = spread_number
if data is None:
self.noises = np.zeros(len(self.times))
self.curves = np.zeros(len(self.times))
self.ranges = np.zeros(len(self.times))
self.domains = np.zeros(len(self.times))
else:
self.noises = data[:, 1]
self.curves = data[:, 2]
self.ranges = data[:, 3]
self.domains = data[:, 4]
@types
def _loadedges(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, float, np.ndarray]:
"""
Attempts to intelligently load the .mat file and take average of left and right edges
:return: left and right averages
:return: times for each column
:return: accept/reject for each column
:return: pixel-inch ratio
"""
data = sco.loadmat(self.filename)
datakeys = [k for k in data.keys()
if ('right' in k) or ('left' in k) or ('edge' in k)]
averagedata = ((data[datakeys[0]] + data[datakeys[1]]) / 2)
try:
times = (data['times'] - data['times'].min())[0]
except KeyError:
times = np.arange(len(data[datakeys[0]][0]))
try:
accept = data['accept']
except KeyError:
accept = np.zeros(len(times))
try:
ratio = data['ratio']
except KeyError:
ratio = 1
try:
viscosity = data['viscosity']
except KeyError:
viscosity = np.ones(len(times))
return averagedata, times, accept, ratio, viscosity
def plot_file(self, name: str=None, time: int=None) -> None:
"""
Plot specific time for provided datafile.
If no time provided, will plot middle.
:param: savefile name
:param: time/data column
"""
if not time:
time = int(len(self.times) / 2)
if not name:
name = './img/' + self.filename + '.png'
yhat, residuals, residual_mean, noise = self._get_fit(time)
plt.figure()
plt.scatter(self.domain, self.averagedata[:, time], alpha=0.2)
plt.plot(yhat)
plt.savefig(name)
@staticmethod
@types
def ddiff(arr: np.ndarray) -> np.ndarray:
"""
Helper Function: Divided Differences
input: array
"""
return arr[:-1] - arr[1:]
@types
def _gaussian_function(self, datalength: int, values: np.ndarray,
height: int, index: int) -> np.ndarray:
"""
i'th Regression Model Gaussian
:param: len(x)
:param: x values
:param: height of gaussian
:param: position of gaussian
:return: gaussian bumps over domain
"""
return height * np.exp(-(1 / (self.spread_number * datalength)) *
(values - ((datalength / self.function_number) * index)) ** 2)
@types
def _get_fit(self, time: int) -> typing.Tuple[np.ndarray, np.ndarray, float, float]:
"""
Fit regression model to data
:param: time (column of data)
:return: predicted points
:return: residuals
:return: mean residual
:return: error
"""
rawdata = self.averagedata[:, time]
domain = np.arange(len(rawdata))
datalength = len(domain)
coefficients = np.zeros((datalength, self.function_number + 2))
coefficients[:, 0] = 1
coefficients[:, 1] = domain
for i in range(self.function_number):
coefficients[:, 2 + i] = self._gaussian_function(datalength, domain, 1, i)
betas = linalg.inv(coefficients.transpose().dot(coefficients)).dot(coefficients.transpose().dot(rawdata))
predicted_values = coefficients.dot(betas)
residuals = rawdata - predicted_values
error = np.sqrt(residuals.transpose().dot(residuals) / (datalength - (self.function_number + 2)))
return predicted_values, residuals, residuals.mean(), error
@types
def _get_noise(self, residuals: np.ndarray) -> float:
"""
Determine Noise of Residuals.
:param: residuals
:return: noise
"""
return np.mean(np.abs(residuals))
@types
def analyze(self, time: int=None) -> typing.Tuple[float, float, int, int]:
"""
Determine noise, curvature, range, and domain of specified array.
:param: pixel to inch ratio
:param: time (column) to use.
:return: curvature
:return: noise
:return: range
:return: domain
"""
if not time:
time = int(len(self.times) / 2)
if self.domains[time] == 0:
yhat, residuals, mean_residual, error = self._get_fit(time)
yhat_p = self.ddiff(yhat)
yhat_pp = self.ddiff(yhat_p)
noise = self._get_noise(residuals)
curvature = (1 / self.ratio) * (1 / len(yhat_pp)) * np.sqrt(si.simps(yhat_pp ** 2))
rng = (self.ratio * (np.max(self.averagedata[:, time]) -
np.min(self.averagedata[:, time])))
dmn = self.ratio * len(self.averagedata[:, time])
self.noises[time] = np.log10(noise)
self.curves[time] = np.log10(curvature)
self.ranges[time] = np.log10(rng)
self.domains[time] = np.log10(dmn)
return self.noises[time], self.curves[time], self.ranges[time], self.domains[time]
@types
def analyze_full(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Determine noise, curvature, range, and domain of specified data.
Like analyze, except examines the entire file.
:param: float->pixel to inch ratio
:return: array->curvatures
:return: array->noises
:return: array->ranges
:return: array->domains
"""
if self.noises[0] == 0:
timelength = len(self.times)
for i in tqdm(range(timelength)):
self.analyze(time=i)
return self.noises, self.curves, self.ranges, self.domains
class ML(object):
def __init__(self, args: argparse.Namespace, algo: str='nn'):
"""
Machine Learning to determine usability of data....
"""
self.algo = self.get_algo(args, algo)
def get_algo(self, args: argparse.Namespace, algo: str) -> object:
""" Returns machine learning algorithm based on arguments """
if algo == 'nn':
return NearestNeighbor(args.nnk)
def train(self) -> None:
""" Trains specified algorithm """
traindata = self.get_data()
self.algo.train(traindata)
def get_data(self) -> np.ndarray:
"""
Gets data for training
We use the domain column to determine what fields have been filled out
If the domain is zero (i.e. not in error) than we should probably ignore it anyway
"""
traindata = data.get_traindata()
return traindata
def plot_fitspace(self, name: str, X: np.ndarray, y: np.ndarray, clf: object) -> None:
""" Plot 2dplane of fitspace """
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
h = 0.01 # Mesh step size
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel(r'$\log_{10}$ Noise')
plt.ylabel(r'$\log_{10}$ Curvature')
plt.savefig(name)
class NearestNeighbor(object):
def __init__(self, k: int):
"""
An example machine learning model. EVERY MODEL NEEDS TO PROVIDE:
1. Train
2. Predict
"""
self.clf = neighbors.KNeighborsClassifier(k, weights='distance',
p=2, algorithm='auto',
n_jobs=8)
def train(self, traindata: np.ndarray) -> None:
""" Trains on dataset """
self.clf.fit(traindata[:, 1:5], traindata[:, 5])
def predict(self, predictdata: np.ndarray) -> np.ndarray:
""" predict given points """
return self.clf.predict(predictdata)
def get_args() -> argparse.Namespace:
"""
Get program arguments.
Just use --help....
"""
parser = argparse.ArgumentParser(prog='python3 linefit.py',
description=('Parameterize and analyze '
'usability of conduit edge data'))
parser.add_argument('files', metavar='F', type=str, nargs='*',
help=('File(s) for processing. '
'Each file has a specific format: '
'See README (or header) for specification.'))
parser.add_argument('-p', '--plot', action='store_true', default=False,
help=('Create Plot of file(s)? Note, unless --time flag used, '
'will plot middle time.'))
parser.add_argument('-pd', '--plotdata', action='store_true', default=False,
help='Create plot of current datastore.')
parser.add_argument('-a', '--analyze', action='store_true', default=False,
help=('Analyze the file and determine Curvature/Noise parameters. '
'If --time not specified, will examine entire file. '
'This will add results to datastore with false flags '
'in accept field if not provided.'))
parser.add_argument('-mt', '--machinetest', action='store_true', default=False,
help=('Determine if the times from the file are usable based on '
'supervised learning model. If --time not specified, '
'will examine entire file.'))
parser.add_argument('-m', '--model', type=str, default='nn',
help=('Learning Model to use. Options are ["nn", "svm", "forest", "sgd"]'))
parser.add_argument('-nnk', '--nnk', type=int, default=10,
help=('k-Parameter for k nearest neighbors. Google it.'))
parser.add_argument('-t', '--time', type=int, default=None,
help=('Time (column) of data to use for analysis OR plotting. '
'Zero-Indexed'))
parser.add_argument('-d', '--datastore', type=str, default=DATASTORE,
help=("Datastore filename override. "
"Don't do this unless you know what you're doing"))
parser.add_argument('-pds', '--printdata', action='store_true', default=False,
help=("Print data"))
parser.add_argument('-pdss', '--printdatashort', action='store_true', default=False,
help=("Print data short"))
args = parser.parse_args()
return args
if __name__ == '__main__':
sys.exit(main())
| lgpl-3.0 |
testalt/electrum-NMC | plugins/plot.py | 1 | 4554 | from PyQt4.QtGui import *
from electrum.plugins import BasePlugin, hook
from electrum.i18n import _
import datetime
from electrum.util import format_satoshis
try:
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.patches import Ellipse
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
flag_matlib=True
except:
flag_matlib=False
class Plugin(BasePlugin):
def fullname(self):
return 'Plot History'
def description(self):
return '%s\n%s' % (_("Ability to plot transaction history in graphical mode."), _("Warning: Requires matplotlib library."))
def is_available(self):
if flag_matlib:
return True
else:
return False
def is_enabled(self):
if not self.is_available():
return False
else:
return True
@hook
def init_qt(self, gui):
self.win = gui.main_window
@hook
def export_history_dialog(self, d,hbox):
self.wallet = d.wallet
history = self.wallet.get_tx_history()
if len(history) > 0:
b = QPushButton(_("Preview plot"))
hbox.addWidget(b)
b.clicked.connect(lambda: self.do_plot(self.wallet))
else:
b = QPushButton(_("No history to plot"))
hbox.addWidget(b)
def do_plot(self,wallet):
history = wallet.get_tx_history()
balance_Val=[]
fee_val=[]
value_val=[]
datenums=[]
unknown_trans=0
pending_trans=0
counter_trans=0
for item in history:
tx_hash, confirmations, is_mine, value, fee, balance, timestamp = item
if confirmations:
if timestamp is not None:
try:
datenums.append(md.date2num(datetime.datetime.fromtimestamp(timestamp)))
balance_string = format_satoshis(balance, False)
balance_Val.append(float((format_satoshis(balance,False)))*1000.0)
except [RuntimeError, TypeError, NameError] as reason:
unknown_trans=unknown_trans+1
pass
else:
unknown_trans=unknown_trans+1
else:
pending_trans=pending_trans+1
if value is not None:
value_string = format_satoshis(value, True)
value_val.append(float(value_string)*1000.0)
else:
value_string = '--'
if fee is not None:
fee_string = format_satoshis(fee, True)
fee_val.append(float(fee_string))
else:
fee_string = '0'
if tx_hash:
label, is_default_label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
x=19
test11="Unknown transactions = "+str(unknown_trans)+" Pending transactions = "+str(pending_trans)+" ."
box1 = TextArea(" Test : Number of pending transactions", textprops=dict(color="k"))
box1.set_text(test11)
box = HPacker(children=[box1],
align="center",
pad=0.1, sep=15)
anchored_box = AnchoredOffsetbox(loc=3,
child=box, pad=0.5,
frameon=True,
bbox_to_anchor=(0.5, 1.02),
bbox_transform=ax.transAxes,
borderpad=0.5,
)
ax.add_artist(anchored_box)
plt.ylabel('mBTC')
plt.xlabel('Dates')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].plot(datenums,balance_Val,marker='o',linestyle='-',color='blue',label='Balance')
axarr[0].legend(loc='upper left')
axarr[0].set_title('History Transactions')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[1].plot(datenums,fee_val,marker='o',linestyle='-',color='red',label='Fee')
axarr[1].plot(datenums,value_val,marker='o',linestyle='-',color='green',label='Value')
axarr[1].legend(loc='upper left')
# plt.annotate('unknown transaction = %d \n pending transactions = %d' %(unknown_trans,pending_trans),xy=(0.7,0.05),xycoords='axes fraction',size=12)
plt.show()
| gpl-3.0 |
MartinThoma/algorithms | ML/50-mlps/07-autokeras/hasy_tools.py | 12 | 46221 | #!/usr/bin/env python
"""
Tools for the HASY dataset.
Type `./hasy_tools.py --help` for the command line tools and `help(hasy_tools)`
in the interactive Python shell for the module options of hasy_tools.
See https://arxiv.org/abs/1701.08380 for details about the dataset.
"""
import csv
import json
import logging
import os
import random
random.seed(0) # make sure results are reproducible
import hashlib
import sys
import numpy as np
from PIL import Image, ImageDraw
from six.moves import urllib
from sklearn.model_selection import train_test_split
np.random.seed(0) # make sure results are reproducible
import matplotlib.pyplot as plt
import scipy.ndimage
try:
from urllib.request import urlretrieve # Python 3
except ImportError:
from urllib import urlretrieve # Python 2
import shutil
import tarfile
from six.moves import cPickle as pickle
from six.moves.urllib.error import HTTPError, URLError
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
__version__ = "v2.4"
n_classes = 369
labels = []
WIDTH = 32
HEIGHT = 32
img_rows = 32
img_cols = 32
img_channels = 1
symbol_id2index = None
def _load_csv(filepath, delimiter=',', quotechar="'"):
"""
Load a CSV file.
Parameters
----------
filepath : str
Path to a CSV file
delimiter : str, optional
quotechar : str, optional
Returns
-------
list of dicts : Each line of the CSV file is one element of the list.
"""
data = []
csv_dir = os.path.dirname(filepath)
with open(filepath) as csvfile:
reader = csv.DictReader(csvfile,
delimiter=delimiter,
quotechar=quotechar)
for row in reader:
for el in ['path', 'path1', 'path2']:
if el in row:
row[el] = os.path.abspath(os.path.join(csv_dir, row[el]))
data.append(row)
return data
def generate_index(csv_filepath):
"""
Generate an index 0...k for the k labels.
Parameters
----------
csv_filepath : str
Path to 'test.csv' or 'train.csv'
Returns
-------
tuple of dict and a list
dict : Maps a symbol_id as in test.csv and
train.csv to an integer in 0...k, where k is the total
number of unique labels.
list : LaTeX labels
"""
symbol_id2index = {}
data = _load_csv(csv_filepath)
i = 0
labels = []
for item in data:
if item['symbol_id'] not in symbol_id2index:
symbol_id2index[item['symbol_id']] = i
labels.append(item['latex'])
i += 1
return symbol_id2index, labels
def _validate_file(fpath, md5_hash):
"""
Validate a file against a MD5 hash.
Parameters
----------
fpath: string
Path to the file being validated
md5_hash: string
The MD5 hash being validated against
Returns
---------
bool
True, if the file is valid. Otherwise False.
"""
hasher = hashlib.md5()
with open(fpath, 'rb') as f:
buf = f.read()
hasher.update(buf)
if str(hasher.hexdigest()) == str(md5_hash):
return True
else:
return False
def _get_file(fname, origin, md5_hash=None, cache_subdir='~/.datasets'):
"""
Download a file from a URL if it not already in the cache.
Passing the MD5 hash will verify the file after download
as well as if it is already present in the cache.
Parameters
----------
fname: name of the file
origin: original URL of the file
md5_hash: MD5 hash of the file for verification
cache_subdir: directory being used as the cache
Returns
-------
Path to the downloaded file
"""
datadir_base = os.path.expanduser("~/.datasets")
if not os.path.exists(datadir_base):
os.makedirs(datadir_base)
if not os.access(datadir_base, os.W_OK):
logging.warning(f"Could not access {cache_subdir}.")
datadir_base = os.path.join('/tmp', '.data')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if md5_hash is not None:
if not _validate_file(fpath, md5_hash):
print('A local file was found, but it seems to be '
'incomplete or outdated.')
download = True
else:
download = True
if download:
print(f'Downloading data from {origin} to {fpath}')
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
return fpath
def load_data(mode='fold-1', image_dim_ordering='tf'):
"""
Load HASYv2 dataset.
Parameters
----------
mode : string, optional (default: "complete")
- "complete" : Returns {'x': x, 'y': y} with all labeled data
- "fold-1": Returns {'x_train': x_train,
'y_train': y_train,
'x_test': x_test,
'y_test': y_test}
- "fold-2", ..., "fold-10": See "fold-1"
- "verification": Returns {'train': {'x_train': List of loaded images,
'y_train': list of labels},
'test-v1': {'X1s': List of first images,
'X2s': List of second images,
'ys': List of labels
'True' or 'False'}
'test-v2': {'X1s': List of first images,
'X2s': List of second images,
'ys': List of labels
'True' or 'False'}
'test-v3': {'X1s': List of first images,
'X2s': List of second images,
'ys': List of labels
'True' or 'False'}}
image_dim_ordering : 'th' for theano or 'tf' for tensorflow (default: 'tf')
Returns
-------
dict
See "mode" parameter for details.
All 'x..' keys contain a uint8 numpy array [index, y, x, depth] (or
[index, depth, y, x] for image_dim_ordering='t')
All 'y..' keys contain a 2D uint8 numpy array [[label]]
"""
# Download if not already done
fname = 'HASYv2.tar.bz2'
origin = 'https://zenodo.org/record/259444/files/HASYv2.tar.bz2'
fpath = _get_file(fname, origin=origin,
md5_hash='fddf23f36e24b5236f6b3a0880c778e3',
cache_subdir='HASYv2')
path = os.path.dirname(fpath)
# Extract content if not already done
untar_fpath = os.path.join(path, "HASYv2")
if not os.path.exists(untar_fpath):
print('Extract contents from archive...')
tfile = tarfile.open(fpath, 'r:bz2')
try:
tfile.extractall(path=untar_fpath)
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
# Create pickle if not already done
pickle_fpath = os.path.join(untar_fpath, "hasy-data.pickle")
if not os.path.exists(pickle_fpath):
# Load mapping from symbol names to indices
symbol_csv_fpath = os.path.join(untar_fpath, "symbols.csv")
symbol_id2index, labels = generate_index(symbol_csv_fpath)
globals()["labels"] = labels
globals()["symbol_id2index"] = symbol_id2index
# Load data
data_csv_fpath = os.path.join(untar_fpath, "hasy-data-labels.csv")
data_csv = _load_csv(data_csv_fpath)
x_compl = np.zeros((len(data_csv), 1, WIDTH, HEIGHT), dtype=np.uint8)
y_compl = []
s_compl = []
path2index = {}
# Load HASYv2 data
for i, data_item in enumerate(data_csv):
fname = os.path.join(untar_fpath, data_item['path'])
s_compl.append(fname)
x_compl[i, 0, :, :] = scipy.ndimage.imread(fname,
flatten=False,
mode='L')
label = symbol_id2index[data_item['symbol_id']]
y_compl.append(label)
path2index[fname] = i
y_compl = np.array(y_compl, dtype=np.int64)
data = {'x': x_compl,
'y': y_compl,
's': s_compl,
'labels': labels,
'path2index': path2index}
# Store data as pickle to speed up later calls
with open(pickle_fpath, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
with open(pickle_fpath, 'rb') as f:
data = pickle.load(f)
globals()["labels"] = data['labels']
labels = data['labels']
x_compl = data['x']
y_compl = np.reshape(data['y'], (len(data['y']), 1))
s_compl = data['s']
path2index = data['path2index']
if image_dim_ordering == 'tf':
x_compl = x_compl.transpose(0, 2, 3, 1)
if mode == 'complete':
return {'x': x_compl, 'y': y_compl}
elif mode.startswith('fold-'):
fold = int(mode.split("-")[1])
if fold < 1 or fold > 10:
raise NotImplementedError
# Load fold
fold_dir = os.path.join(untar_fpath,
f"classification-task/fold-{fold}")
train_csv_fpath = os.path.join(fold_dir, "train.csv")
test_csv_fpath = os.path.join(fold_dir, "test.csv")
train_csv = _load_csv(train_csv_fpath)
test_csv = _load_csv(test_csv_fpath)
train_ids = np.array([path2index[row['path']] for row in train_csv])
test_ids = np.array([path2index[row['path']] for row in test_csv])
x_train = x_compl[train_ids]
x_test = x_compl[test_ids]
y_train = y_compl[train_ids]
y_test = y_compl[test_ids]
s_train = [s_compl[id_] for id_ in train_ids]
s_test = [s_compl[id_] for id_ in test_ids]
data = {'x_train': x_train,
'y_train': y_train,
'x_test': x_test,
'y_test': y_test,
's_train': s_train,
's_test': s_test,
'labels': labels
}
return data
elif mode == 'verification':
# Load the data
symbol_id2index = globals()["symbol_id2index"]
base_ = os.path.join(untar_fpath, "verification-task")
# Load train data
train_csv_fpath = os.path.join(base_, "train.csv")
train_csv = _load_csv(train_csv_fpath)
train_ids = np.array([path2index[row['path']] for row in train_csv])
x_train = x_compl[train_ids]
y_train = y_compl[train_ids]
s_train = [s_compl[id_] for id_ in train_ids]
# Load test data
test1_csv_fpath = os.path.join(base_, 'test-v1.csv')
test2_csv_fpath = os.path.join(base_, 'test-v2.csv')
test3_csv_fpath = os.path.join(base_, 'test-v3.csv')
tmp1 = _load_images_verification_test(test1_csv_fpath,
x_compl,
path2index)
tmp2 = _load_images_verification_test(test2_csv_fpath,
x_compl,
path2index)
tmp3 = _load_images_verification_test(test3_csv_fpath,
x_compl,
path2index)
data = {'train': {'x_train': x_train,
'y_train': y_train,
'source': s_train},
'test-v1': tmp1,
'test-v2': tmp2,
'test-v3': tmp3}
return data
else:
raise NotImplementedError
def load_images(csv_filepath, symbol_id2index,
one_hot=True,
flatten=False,
normalize=True,
shuffle=True):
"""
Load the images into a 4D uint8 numpy array [index, y, x, depth].
Parameters
----------
csv_filepath : str
'test.csv' or 'train.csv'
symbol_id2index : dict
Dictionary generated by generate_index
one_hot : bool, optional (default: True)
Make label vector as 1-hot encoding, otherwise index
flatten : bool, optional (default: False)
Flatten feature vector
normalize : bool, optional (default: True)
Noramlize features to {0.0, 1.0}
shuffle : bool, optional (default: True)
Shuffle loaded data
Returns
-------
images, labels, source :
Images is a 4D uint8 numpy array [index, y, x, depth]
and labels is a 2D uint8 numpy array [index][1-hot enc]
and source is a list of file paths
"""
WIDTH, HEIGHT = 32, 32
dataset_path = os.path.dirname(csv_filepath)
data = _load_csv(csv_filepath)
if flatten:
images = np.zeros((len(data), WIDTH * HEIGHT))
else:
images = np.zeros((len(data), WIDTH, HEIGHT, 1))
labels, sources = [], []
for i, data_item in enumerate(data):
fname = os.path.join(dataset_path, data_item['path'])
sources.append(fname)
if flatten:
img = scipy.ndimage.imread(fname, flatten=False, mode='L')
images[i, :] = img.flatten()
else:
images[i, :, :, 0] = scipy.ndimage.imread(fname,
flatten=False,
mode='L')
label = symbol_id2index[data_item['symbol_id']]
labels.append(label)
# Make sure the type of images is float32
images = np.array(images, dtype=np.float32)
if normalize:
images /= 255.0
data = [images, np.array(labels), sources]
if shuffle:
perm = np.arange(len(labels))
np.random.shuffle(perm)
data[0] = data[0][perm]
data[1] = data[1][perm]
data[2] = [data[2][index] for index in perm]
if one_hot:
data = (data[0], np.eye(len(symbol_id2index))[data[1]], data[2])
return data
def _load_images_verification_test(csv_filepath, x_compl, path2index):
"""
Load images from the verification test files.
Parameters
----------
csv_filepath : str
Path to 'test-v1.csv' or 'test-v2.csv' or 'test-v3.csv'
x_compl : numpy array
Complete hasy data
path2index : dict
Map paths to indices of x_compl
Returns
-------
list
[x1s, x2s, labels, sources] where all four are lists of equal length
x1s and x2s contain images,
labels contains either True or False
sources contains strings
"""
test1_csv = _load_csv(csv_filepath)
test1_x1_ids = np.array([path2index[row['path1']]
for row in test1_csv])
test1_x2_ids = np.array([path2index[row['path2']]
for row in test1_csv])
test1_ys = np.array([row['is_same'] == 'True' for row in test1_csv],
dtype=np.float64)
test1_sources = [(row['path1'], row['path2']) for row in test1_csv]
return {'X1s': x_compl[test1_x1_ids],
'X2s': x_compl[test1_x2_ids],
'ys': test1_ys,
'sources': test1_sources}
def _maybe_download(expected_files, work_directory='HASYv2'):
"""
Download the data, unless it is already there.
Parameters
----------
expected_files : list
Each list contains a dict with keys 'filename', 'source', 'md5sum',
where 'filename' denotes the local filename within work_directory,
'source' is an URL where the file can be downloaded and
'md5sum' is the expected MD5 sum of the file
work_directory : str
"""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
for entry in expected_files:
filepath = os.path.join(work_directory, entry['filename'])
logging.info("Search '%s'", filepath)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(entry['source'], filepath)
statinfo = os.stat(filepath)
logging.info('Successfully downloaded %s (%i bytes)'
% (entry['filename'], statinfo.st_size))
with open(filepath, 'rb') as f:
md5sum_actual = hashlib.md5(f.read()).hexdigest()
if md5sum_actual != entry['md5sum']:
logging.error("File '%s' was expected to have md5sum %s, but "
"has '%s'",
entry['filename'],
entry['md5sum'],
md5sum_actual)
else:
with open(filepath, 'rb') as f:
md5sum_actual = hashlib.md5(f.read()).hexdigest()
if md5sum_actual != entry['md5sum']:
logging.error("File '%s' was expected to have md5sum %s, but "
"has '%s'",
entry['filename'],
entry['md5sum'],
md5sum_actual)
def _maybe_extract(tarfile_path, work_directory):
import tarfile
hasy_tools_path = os.path.join(work_directory, "hasy_tools.py")
if not os.path.isfile(hasy_tools_path):
with tarfile.open(tarfile_path, "r:bz2") as tar:
tar.extractall(path=work_directory)
def _get_data(dataset_path):
"""
Download data and extract it, if it is not already in dataset_path.
Parameters
----------
dataset_path : str
"""
filelist = [{'filename': 'HASYv2.tar.bz2',
'source': ('https://zenodo.org/record/259444/files/'
'HASYv2.tar.bz2'),
'md5sum': 'fddf23f36e24b5236f6b3a0880c778e3'}]
_maybe_download(filelist, work_directory=dataset_path)
tar_filepath = os.path.join(dataset_path, filelist[0]['filename'])
_maybe_extract(tar_filepath, dataset_path)
def _is_valid_png(filepath):
"""
Check if the PNG image is valid.
Parameters
----------
filepath : str
Path to a PNG image
Returns
-------
bool : True if the PNG image is valid, otherwise False.
"""
try:
test = Image.open(filepath)
test.close()
return True
except:
return False
def _verify_all(csv_data_path):
"""Verify all PNG files in the training and test directories."""
train_data = _load_csv(csv_data_path)
for data_item in train_data:
if not _is_valid_png(data_item['path']):
logging.info("%s is invalid." % data_item['path'])
logging.info("Checked %i items of %s." %
(len(train_data), csv_data_path))
def create_random_overview(img_src, x_images, y_images):
"""Create a random overview of images."""
# Create canvas
background = Image.new('RGB',
(35 * x_images, 35 * y_images),
(255, 255, 255))
bg_w, bg_h = background.size
# Paste image on canvas
for x in range(x_images):
for y in range(y_images):
path = random.choice(img_src)['path']
img = Image.open(path, 'r')
img_w, img_h = img.size
offset = (35 * x, 35 * y)
background.paste(img, offset)
# Draw lines
draw = ImageDraw.Draw(background)
for y in range(y_images): # horizontal lines
draw.line((0, 35 * y - 2, 35 * x_images, 35 * y - 2), fill=0)
for x in range(x_images): # vertical lines
draw.line((35 * x - 2, 0, 35 * x - 2, 35 * y_images), fill=0)
# Store
background.save('hasy-overview.png')
def _get_colors(data, verbose=False):
"""
Get how often each color is used in data.
Parameters
----------
data : dict
with key 'path' pointing to an image
verbose : bool, optional
Returns
-------
color_count : dict
Maps a grayscale value (0..255) to how often it was in `data`
"""
color_count = {}
for i in range(256):
color_count[i] = 0
for i, data_item in enumerate(data):
if i % 1000 == 0 and i > 0 and verbose:
print("%i of %i done" % (i, len(data)))
fname = os.path.join('.', data_item['path'])
img = scipy.ndimage.imread(fname, flatten=False, mode='L')
for row in img:
for pixel in row:
color_count[pixel] += 1
return color_count
def data_by_class(data):
"""
Organize `data` by class.
Parameters
----------
data : list of dicts
Each dict contains the key `symbol_id` which is the class label.
Returns
-------
dbc : dict
mapping class labels to lists of dicts
"""
dbc = {}
for item in data:
if item['symbol_id'] in dbc:
dbc[item['symbol_id']].append(item)
else:
dbc[item['symbol_id']] = [item]
return dbc
def _get_color_statistics(csv_filepath, verbose=False):
"""
Count how often white / black is in the image.
Parameters
----------
csv_filepath : str
'test.csv' or 'train.csv'
verbose : bool, optional
"""
symbolid2latex = _get_symbolid2latex()
data = _load_csv(csv_filepath)
black_level, classes = [], []
for symbol_id, elements in data_by_class(data).items():
colors = _get_colors(elements)
b = colors[0]
w = colors[255]
black_level.append(float(b) / (b + w))
classes.append(symbol_id)
if verbose:
print("{}:\t{:0.4f}".format(symbol_id, black_level[-1]))
print("Average black level: {:0.2f}%"
.format(np.average(black_level) * 100))
print("Median black level: {:0.2f}%"
.format(np.median(black_level) * 100))
print("Minimum black level: {:0.2f}% (class: {})"
.format(min(black_level),
[symbolid2latex[c]
for bl, c in zip(black_level, classes)
if bl <= min(black_level)]))
print("Maximum black level: {:0.2f}% (class: {})"
.format(max(black_level),
[symbolid2latex[c]
for bl, c in zip(black_level, classes)
if bl >= max(black_level)]))
def _get_symbolid2latex(csv_filepath='symbols.csv'):
"""Return a dict mapping symbol_ids to LaTeX code."""
symbol_data = _load_csv(csv_filepath)
symbolid2latex = {}
for row in symbol_data:
symbolid2latex[row['symbol_id']] = row['latex']
return symbolid2latex
def _analyze_class_distribution(csv_filepath,
max_data,
bin_size):
"""Plot the distribution of training data over graphs."""
symbol_id2index, labels = generate_index(csv_filepath)
index2symbol_id = {}
for index, symbol_id in symbol_id2index.items():
index2symbol_id[symbol_id] = index
data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)
data = {}
for el in y:
if el in data:
data[el] += 1
else:
data[el] = 1
classes = data
images = len(y)
# Create plot
print("Classes: %i" % len(classes))
print("Images: %i" % images)
class_counts = sorted([count for _, count in classes.items()])
print("\tmin: %i" % min(class_counts))
fig = plt.figure()
ax1 = fig.add_subplot(111)
# plt.title('HASY training data distribution')
plt.xlabel('Amount of available testing images')
plt.ylabel('Number of classes')
# Where we want the ticks, in pixel locations
ticks = [int(el) for el in list(np.linspace(0, max_data, 21))]
# What those pixel locations correspond to in data coordinates.
# Also set the float format here
ax1.set_xticks(ticks)
labels = ax1.get_xticklabels()
plt.setp(labels, rotation=30)
min_examples = 0
ax1.hist(class_counts, bins=range(min_examples, max_data + 1, bin_size))
# plt.show()
filename = '{}.pdf'.format('data-dist')
plt.savefig(filename)
logging.info(f"Plot has been saved as {filename}")
symbolid2latex = _get_symbolid2latex()
top10 = sorted(classes.items(), key=lambda n: n[1], reverse=True)[:10]
top10_data = 0
for index, count in top10:
print("\t%s:\t%i" % (symbolid2latex[index2symbol_id[index]], count))
top10_data += count
total_data = sum([count for index, count in classes.items()])
print("Top-10 has %i training data (%0.2f%% of total)" %
(top10_data, float(top10_data) * 100.0 / total_data))
print("%i classes have more than %i data items." %
(sum([1 for _, count in classes.items() if count > max_data]),
max_data))
def _analyze_pca(csv_filepath):
"""
Analyze how much data can be compressed.
Parameters
----------
csv_filepath : str
Path relative to dataset_path to a CSV file which points to images
"""
import itertools as it
from sklearn.decomposition import PCA
symbol_id2index, labels = generate_index(csv_filepath)
data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)
data = data.reshape(data.shape[0], data.shape[1] * data.shape[2])
pca = PCA()
pca.fit(data)
sum_ = 0.0
done_values = [None, None, None]
done_points = [False, False, False]
chck_points = [0.9, 0.95, 0.99]
for counter, el in enumerate(pca.explained_variance_ratio_):
sum_ += el
for check_point, done, i in zip(chck_points, done_points, it.count()):
if not done and sum_ >= check_point:
done_points[i] = counter
done_values[i] = sum_
for components, variance in zip(done_points, done_values):
print("%i components explain %0.2f of the variance" %
(components, variance))
def _get_euclidean_dist(e1, e2):
"""Calculate the euclidean distance between e1 and e2."""
e1 = e1.flatten()
e2 = e2.flatten()
return sum([(el1 - el2)**2 for el1, el2 in zip(e1, e2)])**0.5
def _inner_class_distance(data):
"""Measure the eucliden distances of one class to the mean image."""
distances = []
mean_img = None
for e1 in data:
fname1 = os.path.join('.', e1['path'])
img1 = scipy.ndimage.imread(fname1, flatten=False, mode='L')
if mean_img is None:
mean_img = img1.tolist()
else:
mean_img += img1
mean_img = mean_img / float(len(data))
# mean_img = thresholdize(mean_img, 'auto')
scipy.misc.imshow(mean_img)
for e1 in data:
fname1 = os.path.join('.', e1['path'])
img1 = scipy.ndimage.imread(fname1, flatten=False, mode='L')
dist = _get_euclidean_dist(img1, mean_img)
distances.append(dist)
return (distances, mean_img)
def thresholdize(img, threshold=0.5):
"""Create a black-and-white image from a grayscale image."""
img_new = []
if threshold == 'auto':
img_flat = sorted(img.flatten())
threshold_ind = int(0.85 * len(img_flat))
threshold = img_flat[threshold_ind]
for row in img:
bla = []
for col in row:
if col > threshold:
bla.append(1)
else:
bla.append(0)
img_new.append(bla)
return np.array(img_new)
def _analyze_distances(csv_filepath):
"""Analyze the distance between elements of one class and class means."""
symbolid2latex = _get_symbolid2latex()
data = _load_csv(csv_filepath)
data = data_by_class(data)
mean_imgs = []
for class_, data_class in data.items():
latex = symbolid2latex[class_]
d, mean_img = _inner_class_distance(data_class)
# scipy.misc.imshow(mean_img)
print("%s: min=%0.4f, avg=%0.4f, median=%0.4f max=%0.4f" %
(latex, np.min(d), np.average(d), np.median(d), np.max(d)))
distarr = sorted([(label, mean_c, _get_euclidean_dist(mean_c,
mean_img))
for label, mean_c in mean_imgs],
key=lambda n: n[2])
for label, mean_c, d in distarr:
print(f"\t{label}: {d:0.4f}")
mean_imgs.append((latex, mean_img))
def _analyze_variance(csv_filepath):
"""Calculate the variance of each pixel."""
symbol_id2index, labels = generate_index(csv_filepath)
data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)
# Calculate mean
sum_ = np.zeros((32, 32))
for el in data:
el = np.squeeze(el)
sum_ += el
mean_ = sum_ / float(len(data))
scipy.misc.imshow(mean_)
# Calculate variance
centered_ = np.zeros((32, 32))
for el in data:
el = np.squeeze(el)
centered_ += (el - mean_)**2
centered_ = (1. / len(data)) * centered_**0.5
scipy.misc.imshow(centered_)
for row in list(centered_):
row = list(row)
print(" ".join(["%0.1f" % nr for nr in row]))
def _analyze_correlation(csv_filepath):
"""
Analyze and visualize the correlation of features.
Parameters
----------
csv_filepath : str
Path to a CSV file which points to images
"""
import pandas as pd
from matplotlib import cm as cm
from matplotlib import pyplot as plt
symbol_id2index, labels = generate_index(csv_filepath)
data, y, s = load_images(csv_filepath,
symbol_id2index,
one_hot=False,
flatten=True)
df = pd.DataFrame(data=data)
logging.info("Data loaded. Start correlation calculation. Takes 1.5h.")
fig = plt.figure()
ax1 = fig.add_subplot(111)
# Where we want the ticks, in pixel locations
ticks = np.linspace(0, 1024, 17)
# What those pixel locations correspond to in data coordinates.
# Also set the float format here
ax1.set_xticks(ticks)
ax1.set_yticks(ticks)
labels = ax1.get_xticklabels()
plt.setp(labels, rotation=30)
cmap = cm.get_cmap('viridis', 30)
cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
ax1.grid(True)
# Add colorbar, make sure to specify tick locations to match desired
# ticklabels
fig.colorbar(cax, ticks=[-0.15, 0, 0.15, 0.30, 0.45, 0.60, 0.75, 0.90, 1])
filename = '{}.pdf'.format('feature-correlation')
plt.savefig(filename)
def _create_stratified_split(csv_filepath, n_splits):
"""
Create a stratified split for the classification task.
Parameters
----------
csv_filepath : str
Path to a CSV file which points to images
n_splits : int
Number of splits to make
"""
from sklearn.model_selection import StratifiedKFold
data = _load_csv(csv_filepath)
labels = [el['symbol_id'] for el in data]
skf = StratifiedKFold(labels, n_folds=n_splits)
i = 1
kdirectory = 'classification-task'
if not os.path.exists(kdirectory):
os.makedirs(kdirectory)
for train_index, test_index in skf:
print("Create fold %i" % i)
directory = "%s/fold-%i" % (kdirectory, i)
if not os.path.exists(directory):
os.makedirs(directory)
else:
print("Directory '%s' already exists. Please remove it." %
directory)
i += 1
train = [data[el] for el in train_index]
test_ = [data[el] for el in test_index]
for dataset, name in [(train, 'train'), (test_, 'test')]:
with open(f"{directory}/{name}.csv", 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path', 'symbol_id', 'latex', 'user_id'))
for el in dataset:
csv_writer.writerow(("../../%s" % el['path'],
el['symbol_id'],
el['latex'],
el['user_id']))
def _create_pair(r1_data, r2_data):
"""Create a pair for the verification test."""
symbol_index = random.choice(r1_data.keys())
r1 = random.choice(r1_data[symbol_index])
is_same = random.choice([True, False])
if is_same:
symbol_index2 = symbol_index
r2 = random.choice(r1_data[symbol_index2])
else:
symbol_index2 = random.choice(r2_data.keys())
while symbol_index2 == symbol_index:
symbol_index2 = random.choice(r2_data.keys())
r2 = random.choice(r2_data[symbol_index2])
return (r1['path'], r2['path'], is_same)
def _create_verification_task(sample_size=32, test_size=0.05):
"""
Create the datasets for the verification task.
Parameters
----------
sample_size : int
Number of classes which will be taken completely
test_size : float in (0, 1)
Percentage of the remaining data to be taken to test
"""
# Get the data
data = _load_csv('hasy-data-labels.csv')
for el in data:
el['path'] = "../hasy-data/" + el['path'].split("hasy-data/")[1]
data = sorted(data_by_class(data).items(),
key=lambda n: len(n[1]),
reverse=True)
symbolid2latex = _get_symbolid2latex()
# Get complete classes
symbols = random.sample(range(len(data)), k=sample_size)
symbols = sorted(symbols, reverse=True)
test_data_excluded = []
for symbol_index in symbols:
# for class_label, items in data:
class_label, items = data.pop(symbol_index)
test_data_excluded += items
print(symbolid2latex[class_label])
# Get data from remaining classes
data_n = []
for class_label, items in data:
data_n = data_n + items
ys = [el['symbol_id'] for el in data_n]
x_train, x_test, y_train, y_test = train_test_split(data_n,
ys,
test_size=test_size)
# Write the training / test data
print("Test data (excluded symbols) = %i" % len(test_data_excluded))
print("Test data (included symbols) = %i" % len(x_test))
print("Test data (total) = %i" % (len(x_test) + len(test_data_excluded)))
kdirectory = 'verification-task'
if not os.path.exists(kdirectory):
os.makedirs(kdirectory)
with open("%s/train.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path', 'symbol_id', 'latex', 'user_id'))
for el in x_train:
csv_writer.writerow((el['path'],
el['symbol_id'],
el['latex'],
el['user_id']))
x_test_inc_class = data_by_class(x_test)
x_text_exc_class = data_by_class(test_data_excluded)
# V1: Both symbols belong to the training set (included symbols)
with open("%s/test-v1.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path1', 'path2', 'is_same'))
for i in range(100000):
test_data_tuple = _create_pair(x_test_inc_class, x_test_inc_class)
csv_writer.writerow(test_data_tuple)
# V2: r1 belongs to a symbol in the training set, but r2 might not
with open("%s/test-v2.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path1', 'path2', 'is_same'))
for i in range(100000):
test_data_tuple = _create_pair(x_test_inc_class, x_text_exc_class)
csv_writer.writerow(test_data_tuple)
# V3: r1 and r2 both don't belong to symbols in the training set
with open("%s/test-v3.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path1', 'path2', 'is_same'))
for i in range(100000):
test_data_tuple = _create_pair(x_text_exc_class, x_text_exc_class)
csv_writer.writerow(test_data_tuple)
def _count_users(csv_filepath):
"""
Count the number of users who contributed to the dataset.
Parameters
----------
csv_filepath : str
Path to a CSV file which points to images
"""
data = _load_csv(csv_filepath)
user_ids = {}
for el in data:
if el['user_id'] not in user_ids:
user_ids[el['user_id']] = [el['path']]
else:
user_ids[el['user_id']].append(el['path'])
max_els = 0
max_user = 0
for user_id, elements in user_ids.items():
if len(elements) > max_els:
max_els = len(elements)
max_user = user_id
print("Dataset has %i users." % len(user_ids))
print("User %s created most (%i elements, %0.2f%%)" %
(max_user, max_els, float(max_els) / len(data) * 100.0))
def _analyze_cm(cm_file, total_symbols=100):
"""
Analyze a confusion matrix.
Parameters
----------
cm_file : str
Path to a confusion matrix in JSON format.
Each line contains a list of non-negative integers.
cm[i][j] indicates how often members of class i were labeled with j
"""
symbolid2latex = _get_symbolid2latex()
symbol_id2index, labels = generate_index('hasy-data-labels.csv')
index2symbol_id = {}
for index, symbol_id in symbol_id2index.items():
index2symbol_id[symbol_id] = index
# Load CM
with open(cm_file) as data_file:
cm = json.load(data_file)
class_accuracy = []
n = len(cm)
test_samples_sum = np.sum(cm)
# Number of recordings for symbols which don't have a single correct
# prediction
sum_difficult_none = 0
# Number of recordings for symbols which have an accuracy of less than 5%
sum_difficult_five = 0
for i in range(n):
total = sum([cm[i][j] for j in range(n)])
class_accuracy.append({'class_index': i,
'class_accuracy': float(cm[i][i]) / total,
'class_confusion_index': np.argmax(cm[i]),
'correct_total': cm[i][i],
'class_total': total})
print("Lowest class accuracies:")
class_accuracy = sorted(class_accuracy, key=lambda n: n['class_accuracy'])
index2latex = lambda n: symbolid2latex[index2symbol_id[n]]
for i in range(total_symbols):
if class_accuracy[i]['correct_total'] == 0:
sum_difficult_none += class_accuracy[i]['class_total']
if class_accuracy[i]['class_accuracy'] < 0.05:
sum_difficult_five += class_accuracy[i]['class_total']
latex_orig = index2latex(class_accuracy[i]['class_index'])
latex_conf = index2latex(class_accuracy[i]['class_confusion_index'])
# print("\t%i. \t%s:\t%0.4f (%s); correct=%i" %
# (i + 1,
# latex_orig,
# class_accuracy[i]['class_accuracy'],
# latex_conf,
# class_accuracy[i]['correct_total']))
print(("\t\\verb+{:<15}+ & ${:<15}$ & {:<15} & \\verb+{:<15}+ "
"& ${:<15}$ \\\\ ({})").format
(latex_orig, latex_orig,
class_accuracy[i]['class_total'],
latex_conf, latex_conf,
class_accuracy[i]['correct_total']))
print("Non-correct: %0.4f%%" %
(sum_difficult_none / float(test_samples_sum)))
print("five-correct: %0.4f%%" %
(sum_difficult_five / float(test_samples_sum)))
print("Easy classes")
class_accuracy = sorted(class_accuracy,
key=lambda n: n['class_accuracy'],
reverse=True)
for i in range(total_symbols):
latex_orig = index2latex(class_accuracy[i]['class_index'])
latex_conf = index2latex(class_accuracy[i]['class_confusion_index'])
if class_accuracy[i]['class_accuracy'] < 0.99:
break
# print("\t%i. \t%s:\t%0.4f (%s); correct=%i" %
# (i + 1,
# latex_orig,
# class_accuracy[i]['class_accuracy'],
# latex_conf,
# class_accuracy[i]['correct_total']))
print(("\t\\verb+{:<15}+ & ${:<15}$ & {:<15} & "
"\\verb+{:<15}+ & ${:<15}$ \\\\ ({})").format
(latex_orig, latex_orig,
class_accuracy[i]['class_total'],
latex_conf, latex_conf,
class_accuracy[i]['correct_total']))
# cm = np.array(cm)
# scipy.misc.imshow(cm)
def preprocess(x):
"""Preprocess features."""
x = x.astype('float32')
x /= 255.0
return x
def _get_parser():
"""Get parser object for hasy_tools.py."""
import argparse
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset",
dest="dataset",
help="specify which data to use")
parser.add_argument("--verify",
dest="verify",
action="store_true",
default=False,
help="verify PNG files")
parser.add_argument("--overview",
dest="overview",
action="store_true",
default=False,
help="Get overview of data")
parser.add_argument("--analyze_color",
dest="analyze_color",
action="store_true",
default=False,
help="Analyze the color distribution")
parser.add_argument("--class_distribution",
dest="class_distribution",
action="store_true",
default=False,
help="Analyze the class distribution")
parser.add_argument("--distances",
dest="distances",
action="store_true",
default=False,
help="Analyze the euclidean distance distribution")
parser.add_argument("--pca",
dest="pca",
action="store_true",
default=False,
help=("Show how many principal components explain "
"90%% / 95%% / 99%% of the variance"))
parser.add_argument("--variance",
dest="variance",
action="store_true",
default=False,
help="Analyze the variance of features")
parser.add_argument("--correlation",
dest="correlation",
action="store_true",
default=False,
help="Analyze the correlation of features")
parser.add_argument("--create-classification-task",
dest="create_folds",
action="store_true",
default=False,
help=argparse.SUPPRESS)
parser.add_argument("--create-verification-task",
dest="create_verification_task",
action="store_true",
default=False,
help=argparse.SUPPRESS)
parser.add_argument("--count-users",
dest="count_users",
action="store_true",
default=False,
help="Count how many different users have created "
"the dataset")
parser.add_argument("--analyze-cm",
dest="cm",
default=False,
help="Analyze a confusion matrix in JSON format.")
return parser
if __name__ == "__main__":
args = _get_parser().parse_args()
if args.verify:
if args.dataset is None:
logging.error("--dataset needs to be set for --verify")
sys.exit()
_verify_all(args.dataset)
if args.overview:
img_src = _load_csv(args.dataset)
create_random_overview(img_src, x_images=10, y_images=10)
if args.analyze_color:
_get_color_statistics(csv_filepath=args.dataset)
if args.class_distribution:
_analyze_class_distribution(csv_filepath=args.dataset,
max_data=1000,
bin_size=25)
if args.pca:
_analyze_pca(csv_filepath=args.dataset)
if args.distances:
_analyze_distances(csv_filepath=args.dataset)
if args.variance:
_analyze_variance(csv_filepath=args.dataset)
if args.correlation:
_analyze_correlation(csv_filepath=args.dataset)
if args.create_folds:
_create_stratified_split(args.dataset, int(args.create_folds))
if args.count_users:
_count_users(csv_filepath=args.dataset)
if args.create_verification_task:
_create_verification_task()
if args.cm:
_analyze_cm(args.cm)
| mit |
jswoboda/SimISR | SimISR/analysisplots.py | 2 | 36407 | #!/usr/bin/env python
"""
Created on Wed May 6 13:55:26 2015
analysisplots.py
This module is used to plot the output from various stages of the simulator to debug
problems. This is also helpful for presentations.
@author: John Swoboda
"""
from . import Path
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import scipy as sp
import scipy.fftpack as scfft
import numpy as np
import seaborn as sns
from .IonoContainer import IonoContainer
from .utilFunctions import readconfigfile,spect2acf,acf2spect
from .specfunctions import ISRspecmakeout#,ISRSfitfunction
def beamvstime(configfile,maindir,params=['Ne'],filetemplate='AltvTime',suptitle = 'Alt vs Time'):
""" This will create a altitude time image for the data for ionocontainer files
that are in sphereical coordinates.
Inputs
Times - A list of times that will be plotted.
configfile - The INI file with the simulation parameters that will be useds.
maindir - The directory the images will be saved in.
params - List of Parameter names that will be ploted. These need to match
in the ionocontainer names.
filetemplate - The first part of a the file names.
suptitle - The supertitle for the plots.
"""
sns.set_style("whitegrid")
sns.set_context("notebook")
# rc('text', usetex=True)
(sensdict,simparams) = readconfigfile(configfile)
paramslower = [ip.lower() for ip in params]
Np = len(params)
maindir=Path(maindir)
inputfile = str(maindir.joinpath('Fitted','fitteddata.h5'))
Ionofit = IonoContainer.readh5(inputfile)
times = Ionofit.Time_Vector
Nt = len(times)
dataloc = Ionofit.Sphere_Coords
pnames = Ionofit.Param_Names
pnameslower = sp.array([ip.lower() for ip in pnames.flatten()])
p2fit = [sp.argwhere(ip==pnameslower)[0][0] if ip in pnameslower else None for ip in paramslower]
angles = dataloc[:,1:]
b = np.ascontiguousarray(angles).view(np.dtype((np.void, angles.dtype.itemsize * angles.shape[1])))
_, idx, invidx = np.unique(b, return_index=True,return_inverse=True)
beamlist = angles[idx]
Nb = beamlist.shape[0]
newfig=True
imcount=0
ifig=-1
for iparam in range(Np):
for ibeam in range(Nb):
if newfig:
(figmplf, axmat) = plt.subplots(3, 3,figsize=(20, 15), facecolor='w',sharex=True, sharey=True)
axvec = axmat.flatten()
newfig=False
ix=0
ifig+=1
ax=axvec[ix]
curbeam = beamlist[ibeam]
curparm = paramslower[iparam]
if curparm == 'nepow':
curparm = 'ne'
indxkep = np.argwhere(invidx==ibeam)[:,0]
rng_fit= dataloc[indxkep,0]
rngargs = np.argsort(rng_fit)
rng_fit = rng_fit[rngargs]
alt_fit = rng_fit*sp.sin(curbeam[1]*sp.pi/180.)
curfit = Ionofit.Param_List[indxkep,:,p2fit[iparam]]
curfit = curfit[rngargs]
Tmat, Amat =np.meshgrid(times[:,0],alt_fit)
image = ax.pcolor(Tmat,Amat,curfit.real,cmap='viridis')
if curparm=='ne':
image.set_norm(colors.LogNorm(vmin=1e9,vmax=5e12))
cbarstr = params[iparam] + ' m-3'
else:
image.set_norm(colors.PowerNorm(gamma=1.,vmin=500,vmax=3e3))
cbarstr = params[iparam] + ' K'
if ix>5:
ax.set_xlabel("Time in s")
if sp.mod(ix,3)==0:
ax.set_ylabel('Alt km')
ax.set_title('{0} vs Altitude, Az: {1}$^o$ El: {2}$^o$'.format(params[iparam],*curbeam))
imcount=imcount+1
ix+=1
if ix==9 or ibeam+1==Nb:
cbar_ax = figmplf.add_axes([.91, .3, .06, .4])
cbar = plt.colorbar(image,cax=cbar_ax)
cbar.set_label(cbarstr)
figmplf.suptitle(suptitle, fontsize=20)
figmplf.tight_layout(rect=[0, .05, .9, .95])
fname= filetemplate+'_{0:0>3}.png'.format(ifig)
plt.savefig(fname)
plt.close(figmplf)
newfig=True
def fitsurfaceplot(paramdict,plotvals,configfile,y_acf,yerr=None,filetemplate='fitsurfs',suptitle = 'Fit Surfaces'):
""" This will create a fit surface plot.
Inputs
paramdict - A dictionary with the followign key value pairs.
Ne - Array of possible electron density values.
Te - Array of possible electron tempreture values.
Ti - Array of possible ion tempreture values.
frac - Array of possible fraction shares of the ion make up.
plotvals - A dictionary with key value pars.
setparam - A string that describes he parameter thats set.
xparam - The parameter that's varied along the x axis of the image.
yparam - The parameter that's varied along the y axis of the image.
indx - The index from the paramdict for the set variable.
configfile - The file thats used for the simulation.
y_acf - the complex ACF used to create the errors.
yerr - The standard deviation of the acf measurement.
filetemplate - The template on how the file will be named.
suptitle - The super title for the plots.
"""
sns.set_style("whitegrid")
sns.set_context("notebook")
(sensdict,simparams) = readconfigfile(configfile)
specs = simparams['species']
nspecs = len(specs)
# make param lists
paramlist = [[]]*(2*nspecs+1)
paramlist[2*(nspecs-1)] =paramdict['Ne']
paramlist[2*(nspecs-1)+1] =paramdict['Te']
if 'frac' in paramdict.keys():
frac = paramdict['frac']
else:
frac = [[1./(nspecs-1)]]*(nspecs-1)
for ispec in range(nspecs-1):
paramlist[2*ispec] =frac[ispec]
paramlist[2*ispec+1] = paramdict['Ti'][ispec]
if 'Vi' in paramdict.keys():
paramlist[-1] = paramdict['Vi']
else:
paramlist[-1] =[0.]
pvals = {'Ne':2*(nspecs-1),'Te':2*(nspecs-1)+1,'Ti':1,'frac':0}
fitsurfs= makefitsurf(paramlist,y_acf,sensdict,simparams,yerr)
quad = (3,3)
i_fig=0
for iplt, idict in enumerate(plotvals):
iaxn = sp.mod(iplt,sp.prod(quad))
if iaxn==0:
(figmplf, axmat) = plt.subplots(quad[0],quad[1],figsize=(20, 15), facecolor='w')
axvec = axmat.flatten()
setstr = idict['setparam']
xstr = idict['xparam']
ystr = idict['yparam']
mloc = pvals[setstr]
xdim = pvals[xstr]
ydim = pvals[ystr]
setval = paramlist[setstr][idict['indx']]
transarr = sp.arange(2*nspecs+1).tolist()
transarr.remove(mloc)
transarr.remove(xdim)
transarr.remove(ydim)
transarr = [mloc,ydim,xdim] +transarr
fitupdate = sp.transpose(fitsurfs,transarr)
while fitupdate.ndim>3:
fitupdate = sp.nanmean(fitupdate,dim=-1)
Z1 = fitupdate[idict['indx']]
iax = axvec[iaxn]
xvec = paramdict[xstr]
yvec = paramdict[ystr]
[Xmat,Ymat]= sp.meshgrid(xvec,yvec)
iax.pcolor(Xmat,Ymat,Z1,norm=colors.LogNorm(vmin=Z1.min(), vmax=Z1.max()))
iax.xlabel=xstr
iax.ylabel=ystr
iax.title('{0} at {0}'.format(setstr,setval))
if iaxn ==sp.prod(quad)-1:
figmplf.suptitle(suptitle, fontsize=20)
fname= filetemplate+'_{0:0>4}.png'.format(i_fig)
plt.savefig(fname)
plt.close(figmplf)
i_fig+=1
def maketi(Ionoin):
""" This makes the ion densities, tempretures and velocities and places
them in the Param_List variable in the ionocontainer object.
"""
(Nloc,Nt,Nion,Nppi) = Ionoin.Param_List.shape
Paramlist = Ionoin.Param_List[:,:,:-1,:]
Vi = Ionoin.getDoppler()
Nisum = sp.sum(Paramlist[:,:,:,0],axis=2)
Tisum = sp.sum(Paramlist[:,:,:,0]*Paramlist[:,:,:,1],axis=2)
Tiave = Tisum/Nisum
Newpl = sp.zeros((Nloc,Nt,Nion+2,Nppi))
Newpl[:,:,:-2,:] = Ionoin.Param_List
Newpl[:,:,-2,0] = Nisum
Newpl[:,:,-2,1] = Tiave
Newpl[:,:,-1,0] = Vi
newrow = sp.array([['Ni','Ti'],['Vi','xx']])
newpn = sp.vstack((Ionoin.Param_Names,newrow))
Ionoin.Param_List = Newpl
Ionoin.Param_Names = newpn
return Ionoin
def plotbeamparametersv2(times, configfile, maindir, fitdir='Fitted', params=['Ne'],
filetemplate='params', suptitle='Parameter Comparison',
werrors=False, nelog=True):
"""
This function will plot the desired parameters for each beam along range.
The values of the input and measured parameters will be plotted
Inputs
Times - A list of times that will be plotted.
configfile - The INI file with the simulation parameters that will be useds.
maindir - The directory the images will be saved in.
params - List of Parameter names that will be ploted. These need to match
in the ionocontainer names.
filetemplate - The first part of a the file names.
suptitle - The supertitle for the plots.
werrors - A bools that determines if the errors will be plotted.
"""
sns.set_style("whitegrid")
sns.set_context("notebook")
# rc('text', usetex=True)
maindir = Path(maindir)
ffit = maindir/fitdir/'fitteddata.h5'
inputfiledir = maindir/'Origparams'
(sensdict, simparams) = readconfigfile(configfile)
paramslower = [ip.lower() for ip in params]
Nt = len(times)
Np = len(params)
#Read in fitted data
Ionofit = IonoContainer.readh5(str(ffit))
dataloc = Ionofit.Sphere_Coords
pnames = Ionofit.Param_Names
pnameslower = sp.array([ip.lower() for ip in pnames.flatten()])
p2fit = [sp.argwhere(ip == pnameslower)[0][0]
if ip in pnameslower else None for ip in paramslower]
time2fit = [None]*Nt
# Have to fix this because of time offsets
if times[0] == 0:
times += Ionofit.Time_Vector[0, 0]
for itn, itime in enumerate(times):
filear = sp.argwhere(Ionofit.Time_Vector[:, 0] >= itime)
if len(filear) == 0:
filenum = len(Ionofit.Time_Vector)-1
else:
filenum = sp.argmin(sp.absolute(Ionofit.Time_Vector[:, 0]-itime))
time2fit[itn] = filenum
times_int = [Ionofit.Time_Vector[i] for i in time2fit]
# determine the beams
angles = dataloc[:, 1:]
rng = sp.unique(dataloc[:, 0])
b_arr = np.ascontiguousarray(angles).view(np.dtype((np.void,
angles.dtype.itemsize * angles.shape[1])))
_, idx, invidx = np.unique(b_arr, return_index=True, return_inverse=True)
beamlist = angles[idx]
Nb = beamlist.shape[0]
# Determine which imput files are to be used.
dirlist = sorted(inputfiledir.glob('*.h5'))
dirliststr = [str(i) for i in dirlist]
sortlist, outime, outfilelist,timebeg,timelist_s = IonoContainer.gettimes(dirliststr)
timelist = timebeg.copy()
time2file = [None]*Nt
time2intime = [None]*Nt
# go through times find files and then times in files
for itn, itime in enumerate(times):
filear = sp.argwhere(timelist >= itime)
if len(filear) == 0:
filenum = [len(timelist)-1]
else:
filenum = filear[0]
flist1 = []
timeinflist = []
for ifile in filenum:
filetimes = timelist_s[ifile]
log1 = (filetimes[:, 0] >= times_int[itn][0]) & (filetimes[:, 0] < times_int[itn][1])
log2 = (filetimes[:, 1] > times_int[itn][0]) & (filetimes[:, 1] <= times_int[itn][1])
log3 = (filetimes[:, 0] <= times_int[itn][0]) & (filetimes[:, 1] > times_int[itn][1])
log4 = (filetimes[:, 0] > times_int[itn][0]) & (filetimes[:, 1] < times_int[itn][1])
curtimes1 = sp.where(log1|log2|log3|log4)[0].tolist()
flist1 = flist1+ [ifile]*len(curtimes1)
timeinflist = timeinflist+curtimes1
time2intime[itn] = timeinflist
time2file[itn] = flist1
nfig = int(sp.ceil(Nt*Nb))
imcount = 0
curfilenum = -1
# Loop for the figures
for i_fig in range(nfig):
lines = [None]*2
labels = [None]*2
(figmplf, axmat) = plt.subplots(int(sp.ceil(Np/2)), 2, figsize=(20, 15), facecolor='w')
axvec = axmat.flatten()
# loop that goes through each axis loops through each parameter, beam
# then time.
for ax in axvec:
if imcount >= Nt*Nb*Np:
break
imcount_f = float(imcount)
itime = int(sp.floor(imcount_f/Nb/Np))
iparam = int(imcount_f/Nb-Np*itime)
ibeam = int(imcount_f-(itime*Np*Nb+iparam*Nb))
curbeam = beamlist[ibeam]
altlist = sp.sin(curbeam[1]*sp.pi/180.)*rng
curparm = paramslower[iparam]
# Use Ne from input to compare the ne derived from the power.
if curparm == 'nepow':
curparm_in = 'ne'
else:
curparm_in = curparm
curcoord = sp.zeros(3)
curcoord[1:] = curbeam
for iplot, filenum in enumerate(time2file[itime]):
if curfilenum != filenum:
curfilenum = filenum
datafilename = dirlist[filenum]
Ionoin = IonoContainer.readh5(str(datafilename))
if ('ti' in paramslower) or ('vi' in paramslower):
Ionoin = maketi(Ionoin)
pnames = Ionoin.Param_Names
pnameslowerin = sp.array([ip.lower() for ip in pnames.flatten()])
prmloc = sp.argwhere(curparm_in == pnameslowerin)
if prmloc.size != 0:
curprm = prmloc[0][0]
# build up parameter vector bs the range values by finding the closest point in space in the input
curdata = sp.zeros(len(rng))
for irngn, irng in enumerate(rng):
curcoord[0] = irng
tempin = Ionoin.getclosestsphere(curcoord)[0][time2intime[itime]]
Ntloc = tempin.shape[0]
tempin = sp.reshape(tempin, (Ntloc, len(pnameslowerin)))
curdata[irngn] = tempin[0, curprm]
#actual plotting of the input data
lines[0] = ax.plot(curdata, altlist, marker='o', c='b', linewidth=2)[0]
labels[0] = 'Input Parameters'
# Plot fitted data for the axis
indxkep = np.argwhere(invidx == ibeam)[:, 0]
curfit = Ionofit.Param_List[indxkep, time2fit[itime], p2fit[iparam]]
rng_fit = dataloc[indxkep, 0]
alt_fit = rng_fit*sp.sin(curbeam[1]*sp.pi/180.)
errorexist = 'n'+paramslower[iparam] in pnameslower
if errorexist and werrors:
eparam = sp.argwhere('n'+paramslower[iparam] == pnameslower)[0][0]
curerror = Ionofit.Param_List[indxkep, time2fit[itime], eparam]
lines[1] = ax.errorbar(curfit, alt_fit, xerr=curerror, fmt='-.',
c='g', linewidth=2)[0]
else:
lines[1] = ax.plot(curfit, alt_fit, marker='o', c='g', linewidth=2)[0]
labels[1] = 'Fitted Parameters'
# get and plot the input data
numplots = len(time2file[itime])
# set the limit for the parameter
if curparm == 'vi':
ax.set(xlim=[-1.25*sp.nanmax(sp.absolute(curfit)), 1.25*sp.nanmax(sp.absolute(curfit))])
elif curparm_in != 'ne':
ax.set(xlim=[0.75*sp.nanmin(curfit), sp.minimum(1.25*sp.nanmax(curfit), 8000.)])
elif (curparm_in == 'ne') and nelog:
ax.set_xscale('log')
ax.set_xlabel(params[iparam])
ax.set_ylabel('Alt km')
ax.set_title('{0} vs Altitude, Time: {1}s Az: {2}$^o$ El: {3}$^o$'.format(params[iparam], times[itime], *curbeam))
imcount += 1
# save figure
figmplf.suptitle(suptitle, fontsize=20)
if None in labels:
labels.remove(None)
lines.remove(None)
plt.figlegend(lines, labels, loc = 'lower center', ncol=5, labelspacing=0.)
fname = filetemplate+'_{0:0>3}.png'.format(i_fig)
plt.savefig(fname)
plt.close(figmplf)
def plotspecs(coords, times, configfile, maindir, cartcoordsys=True, indisp=True, acfdisp=True,
fitdisp=True, filetemplate='spec', suptitle='Spectrum Comparison'):
""" This will create a set of images that compare the input ISR spectrum to the
output ISR spectrum from the simulator.
Inputs
coords - An Nx3 numpy array that holds the coordinates of the desired points.
times - A numpy list of times in seconds.
configfile - The name of the configuration file used.
cartcoordsys - (default True)A bool, if true then the coordinates are given in cartisian if
false then it is assumed that the coords are given in sphereical coordinates.
specsfilename - (default None) The name of the file holding the input spectrum.
acfname - (default None) The name of the file holding the estimated ACFs.
filetemplate (default 'spec') This is the beginning string used to save the images.
"""
sns.set_style("whitegrid")
sns.set_context("notebook")
maindir=Path(maindir).expanduser()
acfname = maindir.joinpath('ACF','00lags.h5')
ffit = maindir.joinpath('Fitted','fitteddata.h5')
specsfiledir = maindir.joinpath('Spectrums')
(sensdict,simparams) = readconfigfile(configfile)
simdtype = simparams['dtype']
npts = simparams['numpoints']*3.0
amb_dict = simparams['amb_dict']
if sp.ndim(coords)==1:
coords = coords[sp.newaxis,:]
Nt = len(times)
Nloc = coords.shape[0]
sns.set_style("whitegrid")
sns.set_context("notebook")
if indisp:
dirlist = [i.name for i in specsfiledir.glob('*.h5')]
timelist = sp.array([float(i.split()[0]) for i in dirlist])
for itn,itime in enumerate(times):
filear = sp.argwhere(timelist>=itime)
if len(filear)==0:
filenum = len(timelist)-1
else:
filenum = filear[0][0]
specsfilename = specsfiledir.joinpath(dirlist[filenum])
Ionoin = IonoContainer.readh5(str(specsfilename))
if itn==0:
specin = sp.zeros((Nloc,Nt,Ionoin.Param_List.shape[-1])).astype(Ionoin.Param_List.dtype)
omeg = Ionoin.Param_Names
npts = Ionoin.Param_List.shape[-1]
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionoin.getclosest(ic,times)[0]
else:
tempin = Ionoin.getclosestsphere(ic,times)[0]
specin[icn,itn] = tempin[0,:]/npts
fs = sensdict['fs']
if acfdisp:
Ionoacf = IonoContainer.readh5(str(acfname))
ACFin = sp.zeros((Nloc,Nt,Ionoacf.Param_List.shape[-1])).astype(Ionoacf.Param_List.dtype)
ts = sensdict['t_s']
omeg = sp.arange(-sp.ceil((npts-1.)/2.),sp.floor((npts-1.)/2.)+1)/ts/npts
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionoacf.getclosest(ic,times)[0]
else:
tempin = Ionoacf.getclosestsphere(ic,times)[0]
if sp.ndim(tempin)==1:
tempin = tempin[sp.newaxis,:]
ACFin[icn] = tempin
specout = scfft.fftshift(scfft.fft(ACFin,n=npts,axis=-1),axes=-1)
if fitdisp:
Ionofit = IonoContainer.readh5(str(ffit))
(omegfit,outspecsfit) =ISRspecmakeout(Ionofit.Param_List,sensdict['fc'],sensdict['fs'],simparams['species'],npts)
Ionofit.Param_List= outspecsfit
Ionofit.Param_Names = omegfit
specfit = sp.zeros((Nloc,Nt,npts))
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionofit.getclosest(ic,times)[0]
else:
tempin = Ionofit.getclosestsphere(ic,times)[0]
if sp.ndim(tempin)==1:
tempin = tempin[sp.newaxis,:]
specfit[icn] = tempin/npts/npts
nfig = int(sp.ceil(Nt*Nloc/6.0))
imcount = 0
for i_fig in range(nfig):
lines = [None]*3
labels = [None]*3
(figmplf, axmat) = plt.subplots(2, 3,figsize=(16, 12), facecolor='w')
axvec = axmat.flatten()
for iax,ax in enumerate(axvec):
if imcount>=Nt*Nloc:
break
iloc = int(sp.floor(imcount/Nt))
itime = int(imcount-(iloc*Nt))
maxvec = []
if fitdisp:
curfitspec = specfit[iloc,itime]
rcsfit = curfitspec.sum()
(taufit,acffit) = spect2acf(omegfit,curfitspec)
guess_acffit = sp.dot(amb_dict['WttMatrix'],acffit)
guess_acffit = guess_acffit*rcsfit/guess_acffit[0].real
spec_intermfit = scfft.fftshift(scfft.fft(guess_acffit,n=npts))
lines[1]= ax.plot(omeg*1e-3,spec_intermfit.real,label='Fitted Spectrum',linewidth=5)[0]
labels[1] = 'Fitted Spectrum'
if indisp:
# apply ambiguity function to spectrum
curin = specin[iloc,itime]
rcs = curin.real.sum()
(tau,acf) = spect2acf(omeg,curin)
guess_acf = sp.dot(amb_dict['WttMatrix'],acf)
guess_acf = guess_acf*rcs/guess_acf[0].real
# fit to spectrums
spec_interm = scfft.fftshift(scfft.fft(guess_acf,n=npts))
maxvec.append(spec_interm.real.max())
lines[0]= ax.plot(omeg*1e-3,spec_interm.real,label='Input',linewidth=5)[0]
labels[0] = 'Input Spectrum With Ambiguity Applied'
if acfdisp:
lines[2]=ax.plot(omeg*1e-3,specout[iloc,itime].real,label='Output',linewidth=5)[0]
labels[2] = 'Estimated Spectrum'
maxvec.append(specout[iloc,itime].real.max())
ax.set_xlabel('f in kHz')
ax.set_ylabel('Amp')
ax.set_title('Location {0}, Time {1}'.format(coords[iloc],times[itime]))
ax.set_ylim(0.0,max(maxvec)*1)
ax.set_xlim([-fs*5e-4,fs*5e-4])
imcount=imcount+1
figmplf.suptitle(suptitle, fontsize=20)
if None in labels:
labels.remove(None)
lines.remove(None)
plt.figlegend( lines, labels, loc = 'lower center', ncol=5, labelspacing=0. )
fname= filetemplate+'_{0:0>3}.png'.format(i_fig)
plt.savefig(fname)
plt.close(figmplf)
def plotacfs(coords,times,configfile,maindir,cartcoordsys = True, indisp=True,acfdisp= True,
fitdisp=True, filetemplate='acf',suptitle = 'ACF Comparison',invacf=''):
""" This will create a set of images that compare the input ISR acf to the
output ISR acfs from the simulator.
Inputs
coords - An Nx3 numpy array that holds the coordinates of the desired points.
times - A numpy list of times in seconds.
configfile - The name of the configuration file used.
cartcoordsys - (default True)A bool, if true then the coordinates are given in cartisian if
false then it is assumed that the coords are given in sphereical coordinates.
specsfilename - (default None) The name of the file holding the input spectrum.
acfname - (default None) The name of the file holding the estimated ACFs.
filetemplate (default 'spec') This is the beginning string used to save the images.
"""
# indisp = specsfilename is not None
# acfdisp = acfname is not None
maindir=Path(maindir).expanduser()
sns.set_style("whitegrid")
sns.set_context("notebook")
acfname = maindir.joinpath('ACF','00lags.h5')
ffit = maindir.joinpath('Fitted','fitteddata.h5')
specsfiledir = maindir.joinpath('Spectrums')
(sensdict,simparams) = readconfigfile(configfile)
simdtype = simparams['dtype']
npts = simparams['numpoints']*3.0
amb_dict = simparams['amb_dict']
if sp.ndim(coords)==1:
coords = coords[sp.newaxis,:]
Nt = len(times)
Nloc = coords.shape[0]
sns.set_style("whitegrid")
sns.set_context("notebook")
pulse = simparams['Pulse']
ts = sensdict['t_s']
tau1 = sp.arange(pulse.shape[-1])*ts
if indisp:
dirlist = [i.name for i in specsfiledir.glob('*.h5')]
timelist = sp.array([float(i.split()[0]) for i in dirlist])
for itn,itime in enumerate(times):
filear = sp.argwhere(timelist>=itime)
if len(filear)==0:
filenum = len(timelist)-1
else:
filenum = filear[0][0]
specsfilename = specsfiledir.joinpath(dirlist[filenum])
Ionoin = IonoContainer.readh5(str(specsfilename))
if itn==0:
specin = sp.zeros((Nloc,Nt,Ionoin.Param_List.shape[-1])).astype(Ionoin.Param_List.dtype)
omeg = Ionoin.Param_Names
npts = Ionoin.Param_List.shape[-1]
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionoin.getclosest(ic,times)[0]
else:
tempin = Ionoin.getclosestsphere(ic,times)[0]
# if sp.ndim(tempin)==1:
# tempin = tempin[sp.newaxis,:]
specin[icn,itn] = tempin[0,:]
if acfdisp:
Ionoacf = IonoContainer.readh5(str(acfname))
ACFin = sp.zeros((Nloc,Nt,Ionoacf.Param_List.shape[-1])).astype(Ionoacf.Param_List.dtype)
omeg = sp.arange(-sp.ceil((npts+1)/2),sp.floor((npts+1)/2))/ts/npts
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionoacf.getclosest(ic,times)[0]
else:
tempin = Ionoacf.getclosestsphere(ic,times)[0]
if sp.ndim(tempin)==1:
tempin = tempin[sp.newaxis,:]
ACFin[icn] = tempin
# Determine the inverse ACF stuff
if len(invacf)==0:
invacfbool = False
else:
invacfbool = True
invfile=maindir.joinpath('ACFInv','00lags'+invacf+'.h5')
Ionoacfinv=IonoContainer.readh5(str(invfile))
ACFinv = sp.zeros((Nloc,Nt,Ionoacfinv.Param_List.shape[-1])).astype(Ionoacfinv.Param_List.dtype)
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionoacfinv.getclosest(ic,times)[0]
else:
tempin = Ionoacfinv.getclosestsphere(ic,times)[0]
if sp.ndim(tempin)==1:
tempin = tempin[sp.newaxis,:]
ACFinv[icn] = tempin
if fitdisp:
Ionofit = IonoContainer.readh5(str(ffit))
(omegfit,outspecsfit) = ISRspecmakeout(Ionofit.Param_List,sensdict['fc'],
sensdict['fs'], simparams['species'],
npts)
Ionofit.Param_List = outspecsfit
Ionofit.Param_Names = omegfit
specfit = sp.zeros((Nloc,Nt,npts))
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionofit.getclosest(ic,times)[0]
else:
tempin = Ionofit.getclosestsphere(ic,times)[0]
if sp.ndim(tempin)==1:
tempin = tempin[sp.newaxis,:]
specfit[icn] = tempin/npts/npts
nfig = int(sp.ceil(Nt*Nloc/3.))
imcount = 0
for i_fig in range(nfig):
lines = [None]*4
labels = [None]*4
lines_im = [None]*4
labels_im = [None]*4
(figmplf, axmat) = plt.subplots(3, 2,figsize=(16, 12), facecolor='w')
for ax in axmat:
if imcount>=Nt*Nloc:
break
iloc = int(sp.floor(imcount/Nt))
itime = int(imcount-(iloc*Nt))
maxvec = []
minvec = []
if indisp:
# apply ambiguity funciton to spectrum
curin = specin[iloc,itime]
(tau,acf) = spect2acf(omeg,curin)
acf1 = scfft.ifftshift(acf)[:len(pulse)]*len(curin)
rcs = acf1[0].real
guess_acf = sp.dot(amb_dict['WttMatrix'],acf)
guess_acf = guess_acf*rcs/guess_acf[0].real
# fit to spectrums
maxvec.append(guess_acf.real.max())
maxvec.append(guess_acf.imag.max())
minvec.append(acf1.real.min())
minvec.append(acf1.imag.min())
lines[0]= ax[0].plot(tau1*1e6,guess_acf.real,label='Input',linewidth=5)[0]
labels[0] = 'Input ACF With Ambiguity Applied'
lines_im[0]= ax[1].plot(tau1*1e6,guess_acf.imag,label='Input',linewidth=5)[0]
labels_im[0] = 'Input ACF With Ambiguity Applied'
if fitdisp:
curinfit = specfit[iloc,itime]
(taufit,acffit) = spect2acf(omegfit,curinfit)
rcsfit=curinfit.sum()
guess_acffit = sp.dot(amb_dict['WttMatrix'],acffit)
guess_acffit = guess_acffit*rcsfit/guess_acffit[0].real
lines[1]= ax[0].plot(tau1*1e6,guess_acffit.real,label='Input',linewidth=5)[0]
labels[1] = 'Fitted ACF'
lines_im[1]= ax[1].plot(tau1*1e6,guess_acffit.imag,label='Input',linewidth=5)[0]
labels_im[1] = 'Fitted ACF'
if acfdisp:
lines[2]=ax[0].plot(tau1*1e6,ACFin[iloc,itime].real,label='Output',linewidth=5)[0]
labels[2] = 'Estimated ACF'
lines_im[2]=ax[1].plot(tau1*1e6,ACFin[iloc,itime].imag,label='Output',linewidth=5)[0]
labels_im[2] = 'Estimated ACF'
maxvec.append(ACFin[iloc,itime].real.max())
maxvec.append(ACFin[iloc,itime].imag.max())
minvec.append(ACFin[iloc,itime].real.min())
minvec.append(ACFin[iloc,itime].imag.min())
if invacfbool:
lines[3]=ax[0].plot(tau1*1e6,ACFinv[iloc,itime].real,label='Output',linewidth=5)[0]
labels[3] = 'Reconstructed ACF'
lines_im[3]=ax[1].plot(tau1*1e6,ACFinv[iloc,itime].imag,label='Output',linewidth=5)[0]
labels_im[3] = 'Reconstructed ACF'
ax[0].set_xlabel(r'$\tau$ in $\mu$s')
ax[0].set_ylabel('Amp')
ax[0].set_title('Real Part')# Location {0}, Time {1}'.format(coords[iloc],times[itime]))
ax[0].set_ylim(min(minvec),max(maxvec)*1)
ax[0].set_xlim([tau1.min()*1e6,tau1.max()*1e6])
ax[1].set_xlabel(r'$\tau$ in $\mu$s')
ax[1].set_ylabel('Amp')
ax[1].set_title('Imag Part')# Location {0}, Time {1}'.format(coords[iloc],times[itime]))
ax[1].set_ylim(min(minvec),max(maxvec)*1)
ax[1].set_xlim([tau1.min()*1e6,tau1.max()*1e6])
imcount=imcount+1
figmplf.suptitle(suptitle, fontsize=20)
if None in labels:
labels.remove(None)
lines.remove(None)
plt.figlegend( lines, labels, loc = 'lower center', ncol=5, labelspacing=0. )
fname= filetemplate+'_{0:0>3}.png'.format(i_fig)
plt.savefig(fname,dpi=300)
plt.close(figmplf)
def plotspecsgen(timeomeg,speclist,needtrans,specnames=None,filename='specs.png',n=None):
fig1 = plt.figure()
sns.set_style("whitegrid")
sns.set_context("notebook")
lines = []
if specnames is None:
specnames = ['Spec {0}'.format(i) for i in range(len(speclist))]
labels = specnames
xlims = [sp.Inf,-sp.Inf]
ylims = [sp.Inf,-sp.Inf]
for ispecn,ispec in enumerate(speclist):
if type(timeomeg)==list:
curbasis = timeomeg[ispecn]
else:
curbasis=timeomeg
if needtrans[ispecn]:
curbasis,ispec= acf2spect(curbasis,ispec,n=n)
lines.append(plt.plot(curbasis*1e-3, ispec.real, linewidth=5)[0])
xlims = [min(xlims[0], min(curbasis)*1e-3), max(xlims[1], max(curbasis)*1e-3)]
ylims = [min(ylims[0], min(ispec.real)), max(ylims[1], max(ispec.real))]
plt.xlabel('f in kHz')
plt.ylabel('Amp')
plt.title('Output Spectrums')
plt.xlim(xlims)
plt.ylim(ylims)
plt.legend(lines, labels)
plt.savefig(filename)
plt.close(fig1)
def analysisdump(maindir,configfile,suptitle=None, params = ['Ne','Nepow','Te','Ti','Vi']):
""" This function will perform all of the plotting functions in this module
given the main directory that all of the files live.
Inputs
maindir - The directory for the simulation.
configfile - The name of the configuration file used.
suptitle - The supertitle used on the files.
"""
maindir = Path(maindir)
plotdir = maindir.joinpath('AnalysisPlots')
if not plotdir.is_dir():
plotdir.mkdir()
#plot spectrums
filetemplate1 = str(maindir.joinpath('AnalysisPlots', 'Spec'))
filetemplate3 = str(maindir.joinpath('AnalysisPlots', 'ACF'))
filetemplate4 = str(maindir.joinpath('AnalysisPlots', 'AltvTime'))
(sensdict, simparams) = readconfigfile(configfile)
angles = simparams['angles']
ang_data = sp.array([[iout[0], iout[1]] for iout in angles])
if not sensdict['Name'].lower() in ['risr', 'pfisr']:
ang_data_temp = ang_data.copy()
beamlistlist = sp.array(simparams['outangles']).astype(int)
ang_data = sp.array([ang_data_temp[i].mean(axis=0) for i in beamlistlist])
zenang = ang_data[sp.argmax(ang_data[:, 1])]
rnggates = simparams['Rangegatesfinal']
rngchoices = sp.linspace(sp.amin(rnggates), sp.amax(rnggates), 4)
angtile = sp.tile(zenang, (len(rngchoices), 1))
coords = sp.column_stack((sp.transpose(rngchoices), angtile))
times = simparams['Timevec']
filetemplate2 = str(maindir.joinpath('AnalysisPlots', 'Params'))
if simparams['Pulsetype'].lower() == 'barker':
params = ['Ne']
if suptitle is None:
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2, werrors=True)
else:
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2, suptitle=suptitle,
werrors=True)
else:
if suptitle is None:
plotspecs(coords, times, configfile, maindir, cartcoordsys=False,
filetemplate=filetemplate1)
plotacfs(coords, times, configfile, maindir, cartcoordsys=False,
filetemplate=filetemplate3)
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2, werrors=True)
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2+'Noerrors', werrors=False)
beamvstime(configfile, maindir, params=params, filetemplate=filetemplate4)
else:
plotspecs(coords, times, configfile, maindir, cartcoordsys=False,
filetemplate=filetemplate1, suptitle=suptitle)
plotacfs(coords, times, configfile, maindir, cartcoordsys=False,
filetemplate=filetemplate3, suptitle=suptitle)
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2, suptitle=suptitle, werrors=True)
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2+'Noerrors', suptitle=suptitle,
werrors=False)
beamvstime(configfile, maindir, params=params, filetemplate=filetemplate4,
suptitle=suptitle)
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/api/sankey_demo_basics.py | 12 | 3421 | """Demonstrate the Sankey class by producing three basic diagrams.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.sankey import Sankey
# Example 1 -- Mostly defaults
# This demonstrates how to create a simple diagram by implicitly calling the
# Sankey.add() method and by appending finish() to the call to the class.
Sankey(flows=[0.25, 0.15, 0.60, -0.20, -0.15, -0.05, -0.50, -0.10],
labels=['', '', '', 'First', 'Second', 'Third', 'Fourth', 'Fifth'],
orientations=[-1, 1, 0, 1, 1, 1, 0, -1]).finish()
plt.title("The default settings produce a diagram like this.")
# Notice:
# 1. Axes weren't provided when Sankey() was instantiated, so they were
# created automatically.
# 2. The scale argument wasn't necessary since the data was already
# normalized.
# 3. By default, the lengths of the paths are justified.
# Example 2
# This demonstrates:
# 1. Setting one path longer than the others
# 2. Placing a label in the middle of the diagram
# 3. Using the the scale argument to normalize the flows
# 4. Implicitly passing keyword arguments to PathPatch()
# 5. Changing the angle of the arrow heads
# 6. Changing the offset between the tips of the paths and their labels
# 7. Formatting the numbers in the path labels and the associated unit
# 8. Changing the appearance of the patch and the labels after the figure is
# created
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[],
title="Flow Diagram of a Widget")
sankey = Sankey(ax=ax, scale=0.01, offset=0.2, head_angle=180,
format='%.0f', unit='%')
sankey.add(flows=[25, 0, 60, -10, -20, -5, -15, -10, -40],
labels = ['', '', '', 'First', 'Second', 'Third', 'Fourth',
'Fifth', 'Hurray!'],
orientations=[-1, 1, 0, 1, 1, 1, -1, -1, 0],
pathlengths = [0.25, 0.25, 0.25, 0.25, 0.25, 0.6, 0.25, 0.25,
0.25],
patchlabel="Widget\nA",
alpha=0.2, lw=2.0) # Arguments to matplotlib.patches.PathPatch()
diagrams = sankey.finish()
diagrams[0].patch.set_facecolor('#37c959')
diagrams[0].texts[-1].set_color('r')
diagrams[0].text.set_fontweight('bold')
# Notice:
# 1. Since the sum of the flows is nonzero, the width of the trunk isn't
# uniform. If verbose.level is helpful (in matplotlibrc), a message is
# given in the terminal window.
# 2. The second flow doesn't appear because its value is zero. Again, if
# verbose.level is helpful, a message is given in the terminal window.
# Example 3
# This demonstrates:
# 1. Connecting two systems
# 2. Turning off the labels of the quantities
# 3. Adding a legend
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[], title="Two Systems")
flows = [0.25, 0.15, 0.60, -0.10, -0.05, -0.25, -0.15, -0.10, -0.35]
sankey = Sankey(ax=ax, unit=None)
sankey.add(flows=flows, label='one',
orientations=[-1, 1, 0, 1, 1, 1, -1, -1, 0])
sankey.add(flows=[-0.25, 0.15, 0.1], fc='#37c959', label='two',
orientations=[-1, -1, -1], prior=0, connect=(0, 0))
diagrams = sankey.finish()
diagrams[-1].patch.set_hatch('/')
plt.legend(loc='best')
# Notice that only one connection is specified, but the systems form a
# circuit since: (1) the lengths of the paths are justified and (2) the
# orientation and ordering of the flows is mirrored.
plt.show()
| gpl-2.0 |
CompPhysics/ComputationalPhysics2 | doc/src/NeuralNet/figures/plotEnergies.py | 10 | 1487 | import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
try:
dataFileName = sys.argv[1]
except IndexError:
print("USAGE: python plotEnergies.py 'filename'")
sys.exit(0)
HFEnergy3 = 3.161921401722216
HFEnergy6 = 20.71924844033019
numParticles = \
int(dataFileName[dataFileName.find('N')+1:dataFileName.find('E')-1])
hfenergyFound = False
if (numParticles == 2):
HFEnergy = 3.161921401722216
hfenergyFound = True
elif (numParticles == 6):
HFEnergy = 20.71924844033019
hfenergyFound = True
else:
hfenergyFound = False
data = np.loadtxt(dataFileName, dtype=np.float64)
data[:,1] = np.sqrt(data[:,1])
n = len(data[:,0])
x = np.arange(0,n)
fig = plt.figure()
if (hfenergyFound):
yline = np.zeros(n)
yline.fill(HFEnergy)
plt.plot(x, yline, 'r--', label="HF Energy")
msize = 1.0
ax = fig.add_subplot(111)
plt.errorbar(x, data[:,0], yerr=data[:,1], fmt='bo', markersize=msize, label="VMC Energy")
plt.fill_between(x, data[:,0]-data[:,1], data[:,0]+data[:,1])
plt.xlim(0,n)
plt.xlabel('Iteration')
plt.ylabel('$E_0[a.u]$')
plt.legend(loc='best')
minSub = 80
maxSub = 120
inset_axes(ax, width="50%", height=1.0, loc='right')
plt.errorbar(x[minSub:maxSub], data[minSub:maxSub,0],
yerr=data[minSub:maxSub,1], fmt='bo', markersize=msize, label="VMC "
"Energy")
plt.plot(x[minSub:maxSub], yline[minSub:maxSub], 'r--', label="HF Energy")
plt.show()
| cc0-1.0 |
bikong2/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
willgrass/pandas | pandas/stats/interface.py | 1 | 3692 | from pandas.core.api import Series
from pandas.stats.ols import OLS, MovingOLS
from pandas.stats.plm import PanelOLS, MovingPanelOLS, NonPooledPanelOLS
import pandas.stats.common as common
def ols(**kwargs):
"""Returns the appropriate OLS object depending on whether you need
simple or panel OLS, and a full-sample or rolling/expanding OLS.
Parameters
----------
y: Series for simple OLS. DataFrame for panel OLS.
x: Series, DataFrame, or dict of Series for simple OLS.
Dict of DataFrame for panel OLS.
intercept: bool
True if you want an intercept. Defaults to True.
nw_lags: None or int
Number of Newey-West lags. Defaults to None.
nw_overlap: bool
Whether there are overlaps in the NW lags. Defaults to False.
window_type: int
FULL_SAMPLE, ROLLING, EXPANDING. FULL_SAMPLE by default.
window: int
size of window (for rolling/expanding OLS)
Panel OLS options:
pool: bool
Whether to run pooled panel regression. Defaults to true.
weights: DataFrame
Weight for each observation. The weights are not normalized;
they're multiplied directly by each observation.
entity_effects: bool
Whether to account for entity fixed effects. Defaults to false.
time_effects: bool
Whether to account for time fixed effects. Defaults to false.
x_effects: list
List of x's to account for fixed effects. Defaults to none.
dropped_dummies: dict
Key is the name of the variable for the fixed effect.
Value is the value of that variable for which we drop the dummy.
For entity fixed effects, key equals 'entity'.
By default, the first dummy is dropped if no dummy is specified.
cluster: {'time', 'entity'}
cluster variances
Returns
-------
The appropriate OLS object, which allows you to obtain betas and various
statistics, such as std err, t-stat, etc.
Examples
--------
# Run simple OLS.
result = ols(y=y, x=x)
# Run rolling simple OLS with window of size 10.
result = ols(y=y, x=x, window_type=ROLLING, window=10)
print result.beta
result = ols(y=y, x=x, nw_lags=1)
# Set up LHS and RHS for data across all items
y = A
x = {'B' : B, 'C' : C}
# Run panel OLS.
result = ols(y=y, x=x)
# Run expanding panel OLS with window 10 and entity clustering.
result = ols(y=y, x=x, cluster=ENTITY, window_type=EXPANDING, window=10)
"""
try:
import scipy as _
except ImportError:
raise Exception('Must install SciPy to use OLS functionality')
pool = kwargs.get('pool')
if 'pool' in kwargs:
del kwargs['pool']
window_type = kwargs.get('window_type', common.FULL_SAMPLE)
window_type = common._get_window_type(window_type)
y = kwargs.get('y')
if window_type == common.FULL_SAMPLE:
for rolling_field in ('window_type', 'window', 'min_periods'):
if rolling_field in kwargs:
del kwargs[rolling_field]
if isinstance(y, Series):
klass = OLS
else:
if pool == False:
klass = NonPooledPanelOLS
else:
klass = PanelOLS
else:
if isinstance(y, Series):
klass = MovingOLS
else:
if pool == False:
klass = NonPooledPanelOLS
else:
klass = MovingPanelOLS
return klass(**kwargs)
| bsd-3-clause |
neuroidss/nupic.research | projects/capybara/supervised_baseline/v1_no_sequences/plot_results.py | 9 | 3714 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import os
import pandas as pd
from sklearn.metrics import (classification_report, confusion_matrix,
accuracy_score)
from baseline_utils import predictions_vote
from plot_utils import (plot_confusion_matrix, plot_train_history,
plot_classification_report, plot_predictions)
if __name__ == '__main__':
# Path to CSV files (training history and predictions)
parser = argparse.ArgumentParser()
parser.add_argument('--vote_window', '-v', dest='vote_window',
type=int, default=11)
parser.add_argument('--input_dir', '-i', dest='input_dir',
type=str, default='results')
parser.add_argument('--output_dir', '-o', dest='output_dir',type=str,
default='plots')
options = parser.parse_args()
vote_window = options.vote_window
input_dir = options.input_dir
output_dir = options.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
train_history_path = os.path.join(input_dir, 'train_history.csv')
predictions_path = os.path.join(input_dir, 'predictions.csv')
# Training history
df = pd.read_csv(train_history_path)
epochs = range(len(df.epoch.values))
acc = df.acc.values
loss = df.loss.values
output_file = os.path.join(output_dir, 'train_history.html')
plot_train_history(epochs, acc, loss, output_file)
print 'Plot saved:', output_file
# Predictions
df = pd.read_csv(predictions_path)
t = df.t.values
X_values = df.scalar_value.values
y_true = df.y_true.values
y_pred = df.y_pred.values
if vote_window > 0:
y_pred = predictions_vote(y_pred, vote_window)
# Accuracy
acc = accuracy_score(y_true, y_pred)
print 'Accuracy on test set:', acc
label_list = sorted(df.y_true.unique())
# Plot normalized confusion matrix
cnf_matrix = confusion_matrix(y_true, y_pred)
output_file = os.path.join(output_dir, 'confusion_matrix.png')
_ = plot_confusion_matrix(cnf_matrix,
output_file,
classes=label_list,
normalize=True,
title='Confusion matrix (accuracy=%.2f)' % acc)
print 'Plot saved:', output_file
# Classification report (F1 score, etc.)
clf_report = classification_report(y_true, y_pred)
output_file = os.path.join(output_dir, 'classification_report.png')
plot_classification_report(clf_report, output_file)
print 'Plot saved:', output_file
# Plot predictions
output_file = os.path.join(output_dir, 'predictions.html')
title = 'Predictions (accuracy=%s)' % acc
plot_predictions(t, X_values, y_true, y_pred, output_file, title)
print 'Plot saved:', output_file
| agpl-3.0 |
gotomypc/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
benoitsteiner/tensorflow-xsmm | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 28 | 12795 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import sys
import argparse
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
# Give a folder path as an argument with '--log_dir' to save
# TensorBoard summaries. Default is a log folder in current directory.
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(current_path, 'log'),
help='The log directory for TensorBoard summaries.')
FLAGS, unparsed = parser.parse_known_args()
# Create the directory for TensorBoard variables if there is not.
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(
vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0],
reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
with tf.name_scope('inputs'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
with tf.name_scope('embeddings'):
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
with tf.name_scope('weights'):
nce_weights = tf.Variable(
tf.truncated_normal(
[vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
with tf.name_scope('biases'):
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
with tf.name_scope('loss'):
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Add the loss value as a scalar to summary.
tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings,
valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Merge all summaries.
merged = tf.summary.merge_all()
# Add variable initializer.
init = tf.global_variables_initializer()
# Create a saver.
saver = tf.train.Saver()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# Open a writer to write summaries.
writer = tf.summary.FileWriter(FLAGS.log_dir, session.graph)
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips,
skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Define metadata variable.
run_metadata = tf.RunMetadata()
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also, evaluate the merged op to get all summaries from the returned "summary" variable.
# Feed metadata variable to session for visualizing the graph in TensorBoard.
_, summary, loss_val = session.run(
[optimizer, merged, loss],
feed_dict=feed_dict,
run_metadata=run_metadata)
average_loss += loss_val
# Add returned summaries to writer in each step.
writer.add_summary(summary, step)
# Add metadata to visualize the graph for the last run.
if step == (num_steps - 1):
writer.add_run_metadata(run_metadata, 'step%d' % step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Write corresponding labels for the embeddings.
with open(FLAGS.log_dir + '/metadata.tsv', 'w') as f:
for i in xrange(vocabulary_size):
f.write(reverse_dictionary[i] + '\n')
# Save the model for checkpoints.
saver.save(session, os.path.join(FLAGS.log_dir, 'model.ckpt'))
# Create a configuration for visualizing embeddings with the labels in TensorBoard.
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = embeddings.name
embedding_conf.metadata_path = os.path.join(FLAGS.log_dir, 'metadata.tsv')
projector.visualize_embeddings(writer, config)
writer.close()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(
label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(
perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
MartinThoma/algorithms | Python/timing/password_hashing.py | 1 | 5049 | #!/usr/bin/env python
import random
import timeit
import numpy as np
from werkzeug.security import check_password_hash, generate_password_hash
random.seed(0)
def main():
str_gen = "import random;random.seed(0);string=''.join(random.choices('ABCDEFGHIJKLM', k=20));"
pw_gen = (
"from werkzeug.security import generate_password_hash, check_password_hash;"
)
string_20 = "".join(random.choices("ABCDEFGHIJKLM", k=20))
# string_2000 = ''.join(random.choices("ABCDEFGHIJKLM", k=2000))
# string_200000 = ''.join(random.choices("ABCDEFGHIJKLM", k=200000))
# string_200000000 = ''.join(random.choices("ABCDEFGHIJKLM", k=200000000))
functions = [
(
"generate_password_hash(string, method='pbkdf2:sha512:1',salt_length=8)",
"sha512, 1 iteration",
str_gen + pw_gen,
),
(
"generate_password_hash(string, method='pbkdf2:sha512:15000',salt_length=8)",
"sha512, 15000 iteration",
str_gen + pw_gen,
),
(
"generate_password_hash(string, method='pbkdf2:sha256:15000',salt_length=8)",
"sha256, 15000 iteration",
str_gen + pw_gen,
),
(
"generate_password_hash(string, method='pbkdf2:sha512:1000',salt_length=8)",
"sha512, 1000 iteration",
str_gen + pw_gen,
),
(
"generate_password_hash(string, method='pbkdf2:sha256:1000',salt_length=8)",
"sha256, 1000 iteration",
str_gen + pw_gen,
),
(
"generate_password_hash(string, method='pbkdf2:md5:15000',salt_length=8)",
"md5, 15000 iteration",
str_gen + pw_gen,
),
]
iter_list(
functions,
title="Password generation time",
outfile="password-generation-time.png",
)
functions = [
(
"check_password_hash(pwhash=hash, password=string)",
"sha512, 1 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:sha512:1', salt_length=8)",
),
(
"check_password_hash(pwhash=hash, password=string)",
"sha512, 1000 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:sha512:1000', salt_length=8)",
),
(
"check_password_hash(pwhash=hash, password=string)",
"sha256, 1000 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:sha256:1000', salt_length=8)",
),
(
"check_password_hash(pwhash=hash, password=string)",
"sha512, 15000 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:sha512:15000', salt_length=8)",
),
(
"check_password_hash(pwhash=hash, password=string)",
"sha256, 15000 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:sha256:15000', salt_length=8)",
),
(
"check_password_hash(pwhash=hash, password=string)",
"md5, 15000 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:md5:15000', salt_length=8)",
),
]
iter_list(
functions,
title="Password verification time",
outfile="password-verification-time.png",
)
def iter_list(functions, title, outfile):
duration_list = {}
for func, name, setup in functions:
durations = timeit.repeat(func, repeat=100, number=3, setup=setup)
duration_list[name] = list(np.array(durations) * 1000)
print(
"{func:<20}: "
"min: {min:5.1f}μs, mean: {mean:5.1f}μs, max: {max:6.1f}μs".format(
func=name,
min=min(durations) * 10 ** 6,
mean=np.mean(durations) * 10 ** 6,
max=max(durations) * 10 ** 6,
)
)
create_boxplot(title, duration_list, outfile=outfile)
def create_boxplot(title, duration_list, showfliers=False, outfile="out.png"):
import operator
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(num=None, figsize=(8, 4), dpi=300, facecolor="w", edgecolor="k")
sns.set(style="whitegrid")
sorted_keys, sorted_vals = zip(
*sorted(duration_list.items(), key=operator.itemgetter(0))
)
flierprops = dict(markerfacecolor="0.75", markersize=1, linestyle="none")
ax = sns.boxplot(
data=sorted_vals,
width=0.3,
orient="h",
flierprops=flierprops,
showfliers=showfliers,
)
ax.set(xlabel="Time in ms", ylabel="")
plt.yticks(plt.yticks()[0], sorted_keys)
ax.set_title(title)
plt.tight_layout()
plt.savefig(outfile)
if __name__ == "__main__":
main()
| mit |
msdogan/HydropowerProject | Pumped_Storage/Pumped_Storage.py | 1 | 12823 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 12 10:50:15 2016
@author: msdogan
"""
# This code optimizes pump-storage hydropower facility operations.
# Mustafa Dogan
### 02/22/2017
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
from scipy.optimize import differential_evolution
import pandas as pd
import seaborn as sns
sns.set_style('whitegrid')
# This part is all about data (hourly marginal price (wholesale) $/MWh)
##*****************************************************************************
# this function creates price-duration curves
def dur_curve(load, duration, time_period):
data_raw, INTERVALSTARTTIME_GMT, INTERVALENDTIME_GMT, OPR_DT, OPR_HR = [],[],[],[],[]
if duration == 'Monthly':
c_month = months.index(time_period) + 1 # python starts from index 0
for i in range(len(load)):
if load.OPR_DT[i].split('-')[1] == c_month:
data_raw.append(load.Price[i])
INTERVALSTARTTIME_GMT.append(load.INTERVALSTARTTIME_GMT[i])
INTERVALENDTIME_GMT.append(load.INTERVALENDTIME_GMT[i])
OPR_DT.append(load.OPR_DT[i])
OPR_HR.append(load.OPR_HR[i])
elif duration == 'Annual':
for i in range(len(load)):
if load.OPR_DT[i].split('-')[0] == time_period: # Unit is $/MWh
data_raw.append(load.Price[i])
INTERVALSTARTTIME_GMT.append(load.INTERVALSTARTTIME_GMT[i])
INTERVALENDTIME_GMT.append(load.INTERVALENDTIME_GMT[i])
OPR_DT.append(load.OPR_DT[i])
OPR_HR.append(load.OPR_HR[i])
elif duration == 'Daily': # does not work for now
y,m,d = time_period.split("-") # year, month, day
for i in range(len(load)):
if load.OPR_DT[i].split('-')[0] == y:
if load.OPR_DT[i].split('-')[1] == m:
if load.OPR_DT[i].split('-')[2] == d:
data_raw.append(load.Price[i])
INTERVALSTARTTIME_GMT.append(load.INTERVALSTARTTIME_GMT[i])
INTERVALENDTIME_GMT.append(load.INTERVALENDTIME_GMT[i])
OPR_DT.append(load.OPR_DT[i])
OPR_HR.append(load.OPR_HR[i])
else:
print('please define correct duration and/or time period')
return
prc_data = [[],[],[],[],[]]
prc_data[0],prc_data[1],prc_data[2],prc_data[3],prc_data[4]=INTERVALSTARTTIME_GMT,INTERVALENDTIME_GMT,OPR_DT,OPR_HR,data_raw
prc_ordered = pd.DataFrame(np.array(prc_data).T, columns = columns).sort_values(['INTERVALSTARTTIME_GMT'])
s_name = 'price_ordered_' + str(time_period) + '.csv'
prc_ordered.to_csv(s_name, index=False, header=True)
# after determining what duration and time period to use, create price-duration data
data = np.sort(data_raw) # sort data
rank = sp.stats.rankdata(data, method='average') # calculate the rank
rank = rank[::-1]
prob = [100*(rank[i]/(len(data)+1)) for i in range(len(data))] # frequency data
# save price-duration data
col = ['Price', 'Frequency']
pdur = [[],[]]
pdur[0],pdur[1] = data, prob
pdur = np.array(pdur)
price_duration = pd.DataFrame(pdur.T, columns = col, dtype = 'float')
s_name = 'price_duration_' + str(time_period) + '.csv'
price_duration.to_csv(s_name)
return price_duration, prc_ordered
# Load Price data from OASIS (CAISO) http://oasis.caiso.com/mrioasis/logon.do
name = 'PRC_HASP_LMP.csv'
df = pd.read_csv(name, parse_dates=True) # read data and sort by time (gmt)
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep','Oct', 'Nov', 'Dec']
P = [[],[],[],[],[]] # empty list to store required data
columns = ['INTERVALSTARTTIME_GMT', 'INTERVALENDTIME_GMT', 'OPR_DT', 'OPR_HR', 'Price'] # headers for data frame
# We are only interested in , start time, end time and LMP
for i in range(len(df)):
if df.LMP_TYPE[i] == "LMP": # Unit is $/MWh
P[0].append(df.INTERVALSTARTTIME_GMT[i]) # GMT start
P[1].append(df.INTERVALENDTIME_GMT[i]) # GMT end
P[2].append(df.OPR_DT[i]) # OPR Date
P[3].append(df.OPR_HR[i]) # OPR hour
P[4].append(df.MW[i]) # price $/MWh
P = np.array(P) # convert list to numpy array
price = pd.DataFrame(P.T, columns = columns) # convert list to data frame
# Examples of 'dur_curve' function use
# Annual Duration and Time
#duration = 'Annual'
#time = '2016'
# Monthly Duration and Time
# duration = 'Monthly'
# time = 'Aug'
# Daily Duration and Time
duration = 'Daily'
time = '2016-09-01'
price_duration, prc_ordered = dur_curve(price, duration, time)
##*****************************************************************************
# Equations
# power_hydro (Watt) = e * g (m/s2) * rho (kg/m3) * Q (m3/s) * head (m)
# power_pump (Watt) = 1/e * g (m/s2) * rho (kg/m3) * Q (m3/s) * head (m)
# generation (Wh) = power (Watt) * hour (h) = 1/(10**6) (MWh)
# revenue ($) = generation (MWh) * price ($/MWh)
# parameters
e_g = 0.90 # generation efficiency
e_p = 0.85 # pumping efficiency
g = 9.81 # m/s2 - acceleration of gravity
rho = 1000 # kg/m3 - density of water
Q_g = 100 # m3/s - water flow for turbine
Q_p = 100 # m3/s - water flow for pumping
head_g = 100 # m - generating head
head_p = 100 # m - pumping head
# objective function to maximize - continuous function
def obj_func_cont(xx, e_g, e_p, g, rho, Q_g, Q_p, head_g, head_p, optimizing = True):
H_T = int(price_duration.Frequency.max()) # total duration (100%)
x1 = np.arange(0,xx)
y1 = f(x1)
x2 = np.arange(H_T-xx,H_T)
y2 = f(x2)
Power_Revenue = np.trapz(y1, x1, dx=0.1, axis = -1)*e_g*rho*g*Q_g*head_g/(10**6)
Pumping_Cost = np.trapz(y2, x2, dx=0.1, axis = -1)/e_p*rho*g*Q_p*head_p/(10**6)
z = Power_Revenue - Pumping_Cost # profit
return -z if optimizing else z
# objective function to maximize - discrete
def obj_func_disc(xx, e_g, e_p, g, rho, Q_g, Q_p, head_g, head_p, optimizing = True):
dH = 0.1 # discretization level
H_T = int(price_duration.Frequency.max()) # total duration (100%)
Power_Revenue = 0
for gen_H in np.arange(0,xx,dH):
Power_Revenue += f(gen_H)*e_g*rho*g*Q_g*head_g*dH/(10**6)
Pumping_Cost = 0
for pump_H in np.arange(H_T-xx,H_T,dH):
Pumping_Cost += f(pump_H)/e_p*rho*g*Q_p*head_p*dH/(10**6)
z = Power_Revenue - Pumping_Cost # profit
return -z if optimizing else z
## objective function to maximize - discrete, no curve fitting
def obj_func_disc_nofit(xx, e_g, e_p, g, rho, Q_g, Q_p, head_g, head_p, optimizing = True):
H_T = int(price_duration.Frequency.max()) # total duration (100%)
prc_g, prc_p, freq_g, freq_p = [],[],[],[]
for i,x in enumerate(price_duration.Frequency):
if x < xx: # Power Generation price and duration
prc_g.append(price_duration.Price[i]), freq_g.append(x)
if H_T - xx < x < H_T: # Pumping price and duration
prc_p.append(price_duration.Price[i]), freq_p.append(x)
prc_g = np.array(prc_g) # generation price
prc_p = np.array(prc_p) # pumping price
freq_g = np.array(freq_g) # generation duration
freq_p = np.array(freq_p) # pumping duration
# Use numerical integration to integrate (Trapezoidal rule)
Power_Revenue = np.trapz(prc_g, freq_g, dx=0.1, axis = -1)*e_g*rho*g*Q_g*head_g/(10**6)
Pumping_Cost = np.trapz(prc_p, freq_p, dx=0.1, axis = -1)/e_p*rho*g*Q_p*head_p/(10**6)
z = Power_Revenue - Pumping_Cost # profit
return z if optimizing else -z
# fit a curve
z = np.polyfit(price_duration.Frequency, price_duration.Price, 9)
f = np.poly1d(z)
x_new = np.linspace(0, price_duration.Frequency.max(), 50)
y_new = f(x_new)
# normal distribution (cumulative, exceedance)
y_norm = np.linspace(0, price_duration.Price.max(), 50)
x_norm = sp.stats.norm(price_duration.Price.mean(), price_duration.Price.std()).sf(y_norm)*100 # survival function
# Reduced Analytical solution without integration: e_g * e_p = P(1-H_G)/P(H_G)
#for i,item in enumerate(price_duration.Frequency):
# if (item + (price_duration.Frequency.max()-item)) <= 100: # total proability cannot exceed 1 (100%)
# if round(f(price_duration.Frequency.max()-item)/f(item),2) == round(e_g * e_p,2):
# H_G = item
# print(H_G)
# differential evolution
result = differential_evolution(obj_func_disc_nofit, bounds=[(0,100)], args = (e_g, e_p, g, rho, Q_g, Q_p, head_g, head_p), maxiter=1000, seed = 1)
H_G = result.x
# print price-duration data and curve fitting
plt.scatter(price_duration.Frequency, price_duration.Price)
plt.xlim([0,price_duration.Frequency.max()])
plt.ylim([0,price_duration.Price.max()])
plt.plot(x_norm, y_norm, 'cyan', label = 'Normal Dist.', linewidth=2) # normal dist. plot
plt.plot(x_new, y_new, 'r', label = 'Curve fit') # curve fit plot
plt.ylabel('15 min price $/MWh', fontsize = 14)
plt.xlabel('duration %', fontsize = 14)
plt.title('Optimal Generating and Pumping Hours for ' + str(time), fontsize = 16)
plt.grid(False)
plt.axvline(x=H_G, linewidth=2, color='k', label = 'Generate Power', linestyle = 'dashed')
plt.axvline(x=price_duration.Frequency.max()-H_G, linewidth=2, color='b', label = 'Pump', linestyle = 'dashed')
plt.legend(fontsize = 12, loc=9)
plt.text(H_G-3,price_duration.Price.min()+(price_duration.Price.max()+price_duration.Price.min())/4, 'Generating Hours, >= ' + str(round(f(H_G),2)) + ' $/MWh', color = 'k', rotation = 'vertical')
plt.text(price_duration.Frequency.max()-H_G+1,price_duration.Price.min()+(price_duration.Price.max()+price_duration.Price.min())/4, 'Pumping Hours, <= ' + str(round(f(price_duration.Frequency.max()-H_G),2)) + ' $/MWh', color = 'b', rotation = 'vertical')
plt.text(5,5,'Generate', fontsize = 15, color = 'k')
plt.text(45,5,'Stop', fontsize = 15, color = 'r')
plt.text(83,5,'Pump', fontsize = 15, color = 'b')
plt.savefig('figure_pd_'+str(time)+'.pdf', transparent=True)
plt.show()
# enumeration
enum_h = np.arange(price_duration.Frequency.min(), price_duration.Frequency.max(), 1)
simulation =np.zeros(len(enum_h))
for i,item in enumerate(enum_h):
simulation[i] = obj_func_cont(item, e_g, e_p, g, rho, Q_g, Q_p, head_g, head_p, optimizing = False)
index = np.where(simulation == simulation.max())[0]
plt.plot(enum_h, simulation, label = 'Net Profit (Gen-Pump)')
plt.axhline(y=0, linewidth=0.5, color='k')
plt.annotate('max', xy=(enum_h[index],simulation.max()), xytext=(enum_h[index],simulation.max()), arrowprops=dict(facecolor='black', shrink=0.5), fontsize = 12)
plt.title('Enumeration Line for ' + str(time), fontsize = 16)
plt.xlabel('duration %', fontsize = 14)
plt.ylabel('profit $/hour', fontsize = 14)
plt.legend(fontsize = 12, loc=1)
plt.grid(False)
plt.savefig('figure_enum_'+str(time)+'.pdf', transparent=True)
plt.show()
prc = np.array(prc_ordered.Price)
gen_prc = np.zeros(len(prc)) # generating price time-series
pump_prc = np.zeros(len(prc)) # pumping price time-series
plot_gen_prc = np.zeros(len(prc)) # this is only for plotting purposes
for i,item in enumerate(prc):
if float(item) >= f(H_G):
gen_prc[i] = item # store generating price
plot_gen_prc[i] = float(max(prc))
if float(item) <= f(price_duration.Frequency.max()-H_G):
pump_prc[i] = item # store pumping price
# # plot time-series data
plot_prc = [prc[i] for i in range(len(prc_ordered.Price))]
plt.bar(range(len(pump_prc)), pump_prc, align='center', color='b', label = 'Pumping Price', alpha=0.25)
plt.bar(range(len(plot_gen_prc)), plot_gen_prc, align='center', color='k', label = 'Generating Price', alpha=0.25)
plt.bar(range(len(gen_prc)), gen_prc, align='center', linewidth=0, color='white', alpha=1)
plt.plot(plot_prc, linewidth=1.5, color='r', label = 'Hourly Price') # use "marker = 'o'" to see points
plt.axhline(y=f(H_G), linewidth=2, color='k', label = 'Generate Power', linestyle = 'dashed')
plt.axhline(y=f(price_duration.Frequency.max()-H_G), linewidth=2, color='b', label = 'Pump', linestyle = 'dashed')
plt.legend(fontsize = 12, loc=9)
plt.xlim([0,len(prc_ordered.Price)])
plt.ylim([0,float(max(prc))])
plt.grid(False)
plt.title('15 Min Price Time-series for ' + str(time), fontsize = 16)
plt.ylabel('15 Min price $/MWh', fontsize = 14)
plt.xlabel('15 min', fontsize = 14)
plt.text(5,f(H_G)+1,'Generate', fontsize = 15, color = 'k')
plt.text(5,(f(H_G)-f(price_duration.Frequency.max()-H_G))/2+f(price_duration.Frequency.max()-H_G),'Stop', fontsize = 15,color = 'r')
plt.text(5,f(price_duration.Frequency.max()-H_G)-3,'Pump', fontsize = 15, color = 'b')
plt.savefig('figure_ts_'+str(time)+'.pdf', transparent=True)
plt.show()
print(result) # show EA solver message
print('')
print('*******Optimal Operation at '+ str(round(H_G,2)) + ' % of Total 15 minutes*******')
| mit |
loggly/skyline | src/analyzer/algorithms.py | 1 | 9913 |
import pandas
import numpy as np
import scipy
import statsmodels.api as sm
import traceback
import logging
from time import time
from msgpack import unpackb, packb
from redis import StrictRedis
from settings import (
ALGORITHMS,
CONSENSUS,
FULL_DURATION,
MAX_TOLERABLE_BOREDOM,
MIN_TOLERABLE_LENGTH,
STALE_PERIOD,
REDIS_SOCKET_PATH,
ENABLE_SECOND_ORDER,
BOREDOM_SET_SIZE,
)
from algorithm_exceptions import *
logger = logging.getLogger("AnalyzerLog")
redis_conn = StrictRedis(unix_socket_path=REDIS_SOCKET_PATH)
"""
This is no man's land. Do anything you want in here,
as long as you return a boolean that determines whether the input
timeseries is anomalous or not.
To add an algorithm, define it here, and add its name to settings.ALGORITHMS.
"""
def tail_avg(timeseries):
"""
This is a utility function used to calculate the average of the last three
datapoints in the series as a measure, instead of just the last datapoint.
It reduces noise, but it also reduces sensitivity and increases the delay
to detection.
"""
try:
t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3
return t
except IndexError:
return timeseries[-1][1]
def median_absolute_deviation(timeseries):
"""
A timeseries is anomalous if the deviation of its latest datapoint with
respect to the median is X times larger than the median of deviations.
"""
series = pandas.Series([x[1] for x in timeseries])
median = series.median()
demedianed = np.abs(series - median)
median_deviation = demedianed.median()
# The test statistic is infinite when the median is zero,
# so it becomes super sensitive. We play it safe and skip when this happens.
if median_deviation == 0:
return False
test_statistic = demedianed.iloc[-1] / median_deviation
# Completely arbitary...triggers if the median deviation is
# 6 times bigger than the median
if test_statistic > 6:
return True
return False
#def grubbs(timeseries):
# """
# A timeseries is anomalous if the Z score is greater than the Grubb's score.
# """
# series = scipy.array([x[1] for x in timeseries])
# stdDev = scipy.std(series)
# mean = np.mean(series)
# tail_average = tail_avg(timeseries)
# z_score = (tail_average - mean) / stdDev
# len_series = len(series)
# threshold = scipy.stats.t.isf(.05 / (2 * len_series), len_series - 2)
# threshold_squared = threshold * threshold
# grubbs_score = ((len_series - 1) / np.sqrt(len_series)) * np.sqrt(threshold_squared / (len_series - 2 + threshold_squared))
# return z_score > grubbs_score
def first_hour_average(timeseries):
"""
Calcuate the simple average over one hour, FULL_DURATION seconds ago.
A timeseries is anomalous if the average of the last three datapoints
are outside of three standard deviations of this value.
"""
last_hour_threshold = time() - (FULL_DURATION - 3600)
series = pandas.Series([x[1] for x in timeseries if x[0] < last_hour_threshold])
mean = (series).mean()
stdDev = (series).std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the average. This does not exponentially weight the MA and so
is better for detecting anomalies with respect to the entire series.
"""
series = pandas.Series([x[1] for x in timeseries])
mean = series.mean()
stdDev = series.std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_moving_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the moving average. This is better for finding anomalies with
respect to the short term trends.
"""
series = pandas.Series([x[1] for x in timeseries])
expAverage = pandas.stats.moments.ewma(series, com=50)
stdDev = pandas.stats.moments.ewmstd(series, com=50)
return abs(series.iloc[-1] - expAverage.iloc[-1]) > 3 * stdDev.iloc[-1]
def mean_subtraction_cumulation(timeseries):
"""
A timeseries is anomalous if the value of the next datapoint in the
series is farther than three standard deviations out in cumulative terms
after subtracting the mean from each data point.
"""
series = pandas.Series([x[1] if x[1] else 0 for x in timeseries])
series = series - series[0:len(series) - 1].mean()
stdDev = series[0:len(series) - 1].std()
expAverage = pandas.stats.moments.ewma(series, com=15)
return abs(series.iloc[-1]) > 3 * stdDev
def least_squares(timeseries):
"""
A timeseries is anomalous if the average of the last three datapoints
on a projected least squares model is greater than three sigma.
"""
x = np.array([t[0] for t in timeseries])
y = np.array([t[1] for t in timeseries])
A = np.vstack([x, np.ones(len(x))]).T
results = np.linalg.lstsq(A, y)
residual = results[1]
m, c = np.linalg.lstsq(A, y)[0]
errors = []
for i, value in enumerate(y):
projected = m * x[i] + c
error = value - projected
errors.append(error)
if len(errors) < 3:
return False
std_dev = scipy.std(errors)
t = (errors[-1] + errors[-2] + errors[-3]) / 3
return abs(t) > std_dev * 3 and round(std_dev) != 0 and round(t) != 0
def histogram_bins(timeseries):
"""
A timeseries is anomalous if the average of the last three datapoints falls
into a histogram bin with less than 20 other datapoints (you'll need to tweak
that number depending on your data)
Returns: the size of the bin which contains the tail_avg. Smaller bin size
means more anomalous.
"""
series = scipy.array([x[1] for x in timeseries])
t = tail_avg(timeseries)
h = np.histogram(series, bins=15)
bins = h[1]
for index, bin_size in enumerate(h[0]):
if bin_size <= 20:
# Is it in the first bin?
if index == 0:
if t <= bins[0]:
return True
# Is it in the current bin?
elif t >= bins[index] and t < bins[index + 1]:
return True
return False
#def ks_test(timeseries):
# """
# A timeseries is anomalous if 2 sample Kolmogorov-Smirnov test indicates
# that data distribution for last 10 minutes is different from last hour.
# It produces false positives on non-stationary series so Augmented
# Dickey-Fuller test applied to check for stationarity.
# """
#
# hour_ago = time() - 3600
# ten_minutes_ago = time() - 600
# reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])
# probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])
# if reference.size < 20 or probe.size < 20:
# return False
# ks_d, ks_p_value = scipy.stats.ks_2samp(reference, probe)
# if ks_p_value < 0.05 and ks_d > 0.5:
# adf = sm.tsa.stattools.adfuller(reference, 10)
# if adf[1] < 0.05:
# return True
# return False
#def is_anomalously_anomalous(metric_name, ensemble, datapoint):
# """
# This method runs a meta-analysis on the metric to determine whether the
# metric has a past history of triggering. TODO: weight intervals based on datapoint
# """
# # We want the datapoint to avoid triggering twice on the same data
# new_trigger = [time(), datapoint]
# # Get the old history
# raw_trigger_history = redis_conn.get('trigger_history.' + metric_name)
# if not raw_trigger_history:
# redis_conn.set('trigger_history.' + metric_name, packb([(time(), datapoint)]))
# return True
# trigger_history = unpackb(raw_trigger_history)
# # Are we (probably) triggering on the same data?
# if (new_trigger[1] == trigger_history[-1][1] and
# new_trigger[0] - trigger_history[-1][0] <= 300):
# return False
# # Update the history
# trigger_history.append(new_trigger)
# redis_conn.set('trigger_history.' + metric_name, packb(trigger_history))
# # Should we surface the anomaly?
# trigger_times = [x[0] for x in trigger_history]
# intervals = [
# trigger_times[i + 1] - trigger_times[i]
# for i, v in enumerate(trigger_times)
# if (i + 1) < len(trigger_times)
# ]
# series = pandas.Series(intervals)
# mean = series.mean()
# stdDev = series.std()
# return abs(intervals[-1] - mean) > 3 * stdDev
def run_selected_algorithm(timeseries, metric_name):
"""
Filter timeseries and run selected algorithm.
"""
# Get rid of short series
if len(timeseries) < MIN_TOLERABLE_LENGTH:
raise TooShort()
# Get rid of stale series
if time() - timeseries[-1][0] > STALE_PERIOD:
raise Stale()
# Get rid of boring series
if len(set(item[1] for item in timeseries[-MAX_TOLERABLE_BOREDOM:])) == BOREDOM_SET_SIZE:
raise Boring()
try:
ensemble = [globals()[algorithm](timeseries) for algorithm in ALGORITHMS]
threshold = len(ensemble) - CONSENSUS
if ensemble.count(False) <= threshold:
# if ENABLE_SECOND_ORDER:
# if is_anomalously_anomalous(metric_name, ensemble, timeseries[-1][1]):
# return True, ensemble, timeseries[-1][1], threshold
# else:
return True, ensemble, timeseries[-1][1], threshold
return False, ensemble, timeseries[-1][1], threshold
except:
logging.error("Algorithm error: " + traceback.format_exc())
return False, [], 1
| mit |
kgullikson88/TS23-Scripts | TrimData.py | 1 | 5641 | import sys
import numpy as np
import matplotlib.pyplot as plt
import FitsUtils
trimming = {
21: [0, 576.3],
22: [582.6, 9e9],
23: [583.45, 9e9],
37: [0, 680.15]}
class Trimmer:
def __init__(self, data=None):
if data != None:
self.data = data.copy()
self.clicks = []
logfile = open("trimlog.dat", "w")
logfile.close()
def InputData(self, data):
self.data = data.copy()
def Plot(self):
self.fig = plt.figure(1, figsize=(11, 10))
cid = self.fig.canvas.mpl_connect('key_press_event', self.keypress)
plt.plot(self.data.x, self.data.y)
plt.plot(self.data.x, self.data.cont)
plt.show()
return self.data.copy()
def keypress(self, event):
if event.key == "r":
print "Set to remove points. Click on the bounds"
self.clipmode = "remove"
self.clickid = self.fig.canvas.mpl_connect('button_press_event', self.mouseclick)
elif event.key == "i":
print "Set to interpolate between points. Click the bounds"
self.clipmode = "interpolate"
self.clickid = self.fig.canvas.mpl_connect('button_press_event', self.mouseclick)
def mouseclick(self, event):
self.clicks.append(event.xdata)
if len(self.clicks) == 2:
left = max(0, np.searchsorted(self.data.x, min(self.clicks)))
right = min(self.data.size() - 1, np.searchsorted(self.data.x, max(self.clicks)))
logfile = open("trimlog.dat", "a")
if self.clipmode == "remove":
logfile.write("Removing:\t%.3f to %.3f\n" % (min(self.clicks), max(self.clicks)))
self.data.x = np.delete(self.data.x, np.arange(left, right + 1))
self.data.y = np.delete(self.data.y, np.arange(left, right + 1))
self.data.cont = np.delete(self.data.cont, np.arange(left, right + 1))
self.data.err = np.delete(self.data.err, np.arange(left, right + 1))
elif self.clipmode == "interpolate":
logfile.write("Interpolating:\t%.3f to %.3f\n" % (min(self.clicks), max(self.clicks)))
x1, x2 = self.data.x[left], self.data.x[right]
y1, y2 = self.data.y[left], self.data.y[right]
m = (y2 - y1) / (x2 - x1)
self.data.y[left:right] = m * (self.data.x[left:right] - x1) + y1
self.data.cont[left:right] = m * (self.data.x[left:right] - x1) + y1
self.fig.clf()
cid = self.fig.canvas.mpl_connect('key_press_event', self.keypress)
plt.plot(self.data.x, self.data.y)
plt.plot(self.data.x, self.data.cont)
plt.draw()
self.fig.canvas.mpl_disconnect(self.clickid)
self.clicks = []
logfile.close()
def main1():
for fname in sys.argv[1:]:
if "-" in fname:
num = int(fname.split("-")[-1].split(".fits")[0])
outfilename = "%s-%i.fits" % (fname.split("-")[0], num + 1)
else:
outfilename = "%s-0.fits" % (fname.split(".fits")[0])
orders = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error",
cont="continuum")
for i, order in enumerate(orders):
if i in trimming.keys():
left = np.searchsorted(order.x, trimming[i][0])
right = np.searchsorted(order.x, trimming[i][1])
order.x = order.x[left:right]
order.y = order.y[left:right]
order.cont = order.cont[left:right]
order.err = order.err[left:right]
orders[i] = order.copy()
columns = {"wavelength": order.x,
"flux": order.y,
"continuum": order.cont,
"error": order.err}
if i == 0:
FitsUtils.OutputFitsFileExtensions(columns, fname, outfilename, mode="new")
else:
FitsUtils.OutputFitsFileExtensions(columns, outfilename, outfilename, mode="append")
if __name__ == "__main__":
trim = Trimmer()
for fname in sys.argv[1:]:
if "-" in fname:
num = int(fname.split("-")[-1].split(".fits")[0])
outfilename = "%s-%i.fits" % (fname.split("-")[0], num + 1)
else:
outfilename = "%s-0.fits" % (fname.split(".fits")[0])
orders = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error",
cont="continuum")
# trim = Trimmer(orders[0])
logfile = open("trimlog.dat", "a")
logfile.write("\n\n\n******************************************\n")
logfile.write("\nTrimming file %s\n\n" % (fname))
logfile.write("******************************************\n")
logfile.close()
for i, order in enumerate(orders):
logfile = open("trimlog.dat", "a")
logfile.write("******** Order %i ******************\n" % (i + 1))
logfile.close()
trim.InputData(order)
order = trim.Plot()
columns = {"wavelength": order.x,
"flux": order.y,
"continuum": order.cont,
"error": order.err}
if i == 0:
FitsUtils.OutputFitsFileExtensions(columns, fname, outfilename, mode="new")
else:
FitsUtils.OutputFitsFileExtensions(columns, outfilename, outfilename, mode="append")
| gpl-3.0 |
DucQuang1/BuildingMachineLearningSystemsWithPython | ch04/blei_lda.py | 21 | 2601 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
from wordcloud import create_cloud
try:
from gensim import corpora, models, matutils
except:
print("import gensim failed.")
print()
print("Please install it")
raise
import matplotlib.pyplot as plt
import numpy as np
from os import path
NUM_TOPICS = 100
# Check that data exists
if not path.exists('./data/ap/ap.dat'):
print('Error: Expected data to be present at data/ap/')
print('Please cd into ./data & run ./download_ap.sh')
# Load the data
corpus = corpora.BleiCorpus('./data/ap/ap.dat', './data/ap/vocab.txt')
# Build the topic model
model = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=None)
# Iterate over all the topics in the model
for ti in range(model.num_topics):
words = model.show_topic(ti, 64)
tf = sum(f for f, w in words)
with open('topics.txt', 'w') as output:
output.write('\n'.join('{}:{}'.format(w, int(1000. * f / tf)) for f, w in words))
output.write("\n\n\n")
# We first identify the most discussed topic, i.e., the one with the
# highest total weight
topics = matutils.corpus2dense(model[corpus], num_terms=model.num_topics)
weight = topics.sum(1)
max_topic = weight.argmax()
# Get the top 64 words for this topic
# Without the argument, show_topic would return only 10 words
words = model.show_topic(max_topic, 64)
# This function will actually check for the presence of pytagcloud and is otherwise a no-op
create_cloud('cloud_blei_lda.png', words)
num_topics_used = [len(model[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist(num_topics_used, np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
fig.tight_layout()
fig.savefig('Figure_04_01.png')
# Now, repeat the same exercise using alpha=1.0
# You can edit the constant below to play around with this parameter
ALPHA = 1.0
model1 = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=ALPHA)
num_topics_used1 = [len(model1[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist([num_topics_used, num_topics_used1], np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
# The coordinates below were fit by trial and error to look good
ax.text(9, 223, r'default alpha')
ax.text(26, 156, 'alpha=1.0')
fig.tight_layout()
fig.savefig('Figure_04_02.png')
| mit |
AustereCuriosity/numpy | numpy/linalg/linalg.py | 2 | 75877 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| bsd-3-clause |
nonabelian/tda_dionysus | scripts/pca_demo.py | 1 | 1605 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import topology.data as td
def run_coffee_mug_pca_example():
X, y = td.coffee_mug(bottom_label=0, side_label=0, handle_label=1)
c = ['r' if l else 'b' for l in y]
# Nontrivial rotation around the x-axis
angle = np.pi / 4.0
rotation_matrix = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
X = rotation_matrix.dot(X.T).T
# Perform PCA 3D down to 2D
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X_pca[:,0], X_pca[:,1], c=c)
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
plt.savefig('images/coffee_mug_pca.png')
plt.show()
def run_pail_pca_example():
X, y = td.pail(bottom_label=0, side_label=0, handle_label=1)
c = ['r' if l else 'b' for l in y]
# Nontrivial rotation around the x-axis
angle = np.pi / 4.0
rotation_matrix = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
X = rotation_matrix.dot(X.T).T
# Perform PCA 3D down to 2D:
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X_pca[:,0], X_pca[:,1], c=c)
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
plt.savefig('images/pail_pca.png')
plt.show()
if __name__ == '__main__':
run_coffee_mug_pca_example()
run_pail_pca_example()
| gpl-3.0 |
GitYiheng/reinforcement_learning_test | test03_monte_carlo/t35_rlvps01.py | 1 | 7662 | import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
start_time = time.time()
MAX_TEST = 10
for test_num in range(3,MAX_TEST+1):
# Hyperparameters
RANDOM_NUMBER_SEED = test_num
ENVIRONMENT1 = "morph-v0"
MAX_EPISODES = 20000 # number of episodes
EPISODE_LENGTH = 2000 # single episode length
HIDDEN_SIZE = 16
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 100 # Including previous 100 rewards
CONST_LR = True # Constant or decaying learing rate
# Constant learning rate
const_learning_rate_in = 0.001
# Decay learning rate
start_learning_rate_in = 0.003
decay_steps_in = 100
decay_rate_in = 0.95
DIR_PATH_SAVEFIG = "/root/cartpole_plot/"
if CONST_LR:
learning_rate = const_learning_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_clr" + str(learning_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
else:
start_learning_rate = start_learning_rate_in
decay_steps = decay_steps_in
decay_rate = decay_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_dlr_slr" + str(start_learning_rate).replace(".", "p") \
+ "_ds" + str(decay_steps) \
+ "_dr" + str(decay_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = 4
output_size = 2
# input_size = env.observation_space.shape[0]
# try:
# output_size = env.action_space.shape[0]
# except AttributeError:
# output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
if not CONST_LR:
# decay learning rate
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, decay_steps, decay_rate, staircase=False)
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
if CONST_LR:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
else:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi, global_step=global_step)
# saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
returns = []
mean_returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_return = np.mean(running_returns)
mean_returns.append(mean_return)
if CONST_LR:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), learning_rate, raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
else:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), sess.run(learning_rate), raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
env.close() # close openai gym environment
tf.reset_default_graph() # clear tensorflow graph
# Plot
# plt.style.use('ggplot')
plt.style.use('dark_background')
episodes_plot = np.arange(MAX_EPISODES)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
if CONST_LR:
ax.set_title("The Cart-Pole Problem Test %i \n \
Episode Length: %i \
Discount Factor: %.2f \n \
Number of Hidden Neuron: %i \
Constant Learning Rate: %.5f" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, learning_rate))
else:
ax.set_title("The Cart-Pole Problem Test %i \n \
EpisodeLength: %i DiscountFactor: %.2f NumHiddenNeuron: %i \n \
Decay Learning Rate: (start: %.5f, steps: %i, rate: %.2f)" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, start_learning_rate, decay_steps, decay_rate))
ax.set_xlabel("Episode")
ax.set_ylabel("Return")
ax.set_ylim((0, EPISODE_LENGTH))
ax.grid(linestyle='--')
ax.plot(episodes_plot, returns, label='Instant return')
ax.plot(episodes_plot, mean_returns, label='Averaged return')
legend = ax.legend(loc='best', shadow=True)
fig.savefig(DIR_PATH_SAVEFIG + file_name_savefig, dpi=500)
# plt.show()
| mit |
schets/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
AssistiveRoboticsUNH/threespace_ros | scripts/canal_surface_test/gmm.py | 1 | 15791 | #!/usr/env/python
from __future__ import print_function
import platform
print(platform.platform())
import sys
print("Python", sys.version)
import numpy as np;
print("NumPy", np.__version__)
import scipy
print("SciPy", scipy.__version__)
import sklearn
print("Scikit-Learn", sklearn.__version__)
import rospy
import rosbag
import sys
import os
import itertools
import scipy.io as sio
from pomegranate import *
from os import listdir
from dtw import dtw
from sklearn.metrics.pairwise import euclidean_distances
from os.path import isfile, join
from pypr.stattest import *
def align_signal(s_, t, w=5, has_time=True, get_distance=False):
"""
every column of s or t is a time series
every row is dimensions of signal at one time point
w size is symmetric. w=5 means the window has size 11.
"""
# t = t.transpose(1, 0)
# s = s.transpose(1, 0)
if has_time:
s_ = s_[:, 1:]
t = t[:, 1:]
dist_fun = euclidean_distances
dist_, cost_, acc_, path_ = dtw(s_, t, dist_fun)
path_ = np.array(path_)
warped_t = t[path_[1, :], :]
new_t = np.zeros(s_.shape)
for i in range(warped_t.shape[0]):
new_t[path_[0, i], :] = warped_t[i, :]
if has_time:
Ts = np.arange(1, s_.shape[0] + 1)
Ts = Ts.reshape(-1, 1)
new_t = np.hstack((Ts, new_t))
if get_distance:
return new_t, dist_
return new_t
warnings.simplefilter("ignore", category=RuntimeWarning)
warnings.simplefilter("ignore", category=DeprecationWarning)
exNames = {"circle", "square", "triangle", "complex", "swiperight", "swipeleft", "rotateright", "rotateleft", "scupcw",
"scupccw"}
idx = 0
LABELS = {}
NAMES = {}
NAMESCAP = {}
exercises_by_type = {}
exercises_extended_by_type = {}
exercises_compressed_by_type = {}
slow_by_type = {}
slow_compressed_to_normal_by_type = {}
slow_compressed_to_fast_by_type = {}
fast_by_type = {}
fast_extended_to_normal_by_type = {}
fast_extended_to_slow_by_type = {}
labels_by_type = {}
lengths_by_type = {}
train_by_type = {}
test_by_type = {}
train_extended_by_type = {}
test_extended_by_type = {}
train_compressed_by_type = {}
test_compressed_by_type = {}
SUBJECTS = ['s1']
for name in exNames:
LABELS[name] = idx
NAMES[idx] = name
exercises_by_type[name] = []
exercises_extended_by_type[name] = []
exercises_compressed_by_type[name] = []
NAMESCAP[name] = name.title()
train_by_type[name] = []
slow_by_type[name] = []
slow_compressed_to_normal_by_type[name] = []
slow_compressed_to_fast_by_type[name] = []
fast_by_type[name] = []
fast_extended_to_normal_by_type[name] = []
fast_extended_to_slow_by_type[name] = []
test_by_type[name] = []
train_extended_by_type[name] = []
test_extended_by_type[name] = []
train_compressed_by_type[name] = []
test_compressed_by_type[name] = []
labels_by_type[name] = []
idx += 1
# LOAD FILES
matfiles = [f for f in listdir('matfiles') if (isfile(join('matfiles', f)))]
exercise_data = []
labels = []
addAccels = True
# DTW REGULAR DEMONSTRATIONS
addAccels = True
print('Aligning regular demonstrations')
for l in LABELS:
print(l)
tf_data = []
for f in matfiles:
if ('slow' not in f) and ('full' not in f) and ('fast' not in f) and (l in f):
print(f)
data = sio.loadmat('matfiles/' + f)
data = data.get('data')
for i in range(len(data)):
exercise_data.append(data[i][:-1])
labels.append(data[i][-1])
tf_data.append(data[i][0:22])
labels_by_type.get(l).append(data[:][:-1])
if addAccels:
t = data[:, 0:22]
t = np.hstack((t, data[:, 26:29]))
t = np.hstack((t, data[:, 33:36]))
t = np.hstack((t, data[:, 40:43]))
exercises_by_type.get(l).append(t)
else:
exercises_by_type.get(l).append(data[:, 0:22])
else:
continue
maxlen = -1
index = 0
x = exercises_by_type.get(l)
for ex in range(0, len(x)):
if len(x[ex]) > maxlen:
maxlen = len(x[ex])
index = ex
lengths_by_type[l] = maxlen
for ex in range(0, len(x)):
# print('----------')
# print(x[ex].shape)
x[ex], dis = align_signal(x[index],
x[ex],
has_time=True,
get_distance=True)
print(x[ex].shape)
print("Adding slow and fast demonstrations")
# for l in LABELS:
tf_data = []
for f in matfiles:
if (('slow' in f) or ('fast' in f)) and ('full' not in f) and (l in f):
print(f)
data = sio.loadmat('matfiles/' + f)
data = data.get('data')
for i in range(len(data)):
tf_data.append(data[i][0:22])
if addAccels:
t = data[:, 0:22]
t = np.hstack((t, data[:, 26:29]))
t = np.hstack((t, data[:, 33:36]))
t = np.hstack((t, data[:, 40:43]))
if 'slow' in f:
slow_by_type.get(l).append(t)
else:
fast_by_type.get(l).append(t)
else:
if 'slow' in f:
slow_by_type.get(l).append(data[:, 0:22])
else:
fast_by_type.get(l).append(data[:, 0:22])
else:
continue
# COMPRESS SLOW DEMONSTRATIONS
# print('Compressing slow and normal demonstrations')
# for l in LABELS:
# print(NAMES.get(LABELS.get(l)))
# print('-----compress slow to normal-------')
# print(slow_by_type.get(l)[0].shape)
slow_compressed_to_normal_by_type.get(l).append(align_signal(exercises_by_type.get(l)[0],
slow_by_type.get(l)[0],
has_time=True,
get_distance=False)
)
print(slow_compressed_to_normal_by_type.get(l)[0].shape)
# print('-----compress slow to fast-------')
# print(slow_by_type.get(l)[0].shape)
slow_compressed_to_fast_by_type.get(l).append(align_signal(fast_by_type.get(l)[0],
slow_by_type.get(l)[0],
has_time=True,
get_distance=False)
)
print(slow_compressed_to_fast_by_type.get(l)[0].shape)
# print('----- compress normal to fast ----')
x = exercises_by_type.get(l)
# print(x[0].shape)
for ex in range(len(x)):
exercises_compressed_by_type.get(l).append(align_signal(fast_by_type.get(l)[0],
exercises_by_type.get(l)[ex],
has_time=True,
get_distance=False)
)
# print('--------------------------')
# print(exercises_compressed_by_type.get(l)[ex].shape)
# EXTEND NORMAL DEMONSTRATIONS
# print("Extending normal and fast demonstration")
# for l in LABELS:
# print(str(len(exercises_by_type.get(l)))+' ***********')
x = exercises_by_type.get(l)
# print('--------- extend normal to slow ---------')
for ex in range(len(x)):
exercises_extended_by_type.get(l).append(align_signal(slow_by_type.get(l)[0],
exercises_by_type.get(l)[ex],
has_time=True,
get_distance=False))
print(exercises_extended_by_type.get(l)[ex].shape)
# print('--------------------')
# print('--------- extend fast to normal ---------')
fast_extended_to_normal_by_type.get(l).append(align_signal(exercises_by_type.get(l)[0],
fast_by_type.get(l)[0],
has_time=True,
get_distance=False)
)
# print(fast_extended_to_normal_by_type.get(l)[0].shape)
# print('--------- extend fast to slow ---------')
fast_extended_to_slow_by_type.get(l).append(align_signal(slow_by_type.get(l)[0],
fast_by_type.get(l)[0],
has_time=True,
get_distance=False)
)
print(fast_extended_to_slow_by_type.get(l)[0].shape)
# print("Adding stamps")
# ADD SEQUENCE NUMBERS
# for l in LABELS:
x = exercises_by_type.get(l)
maxlen = len(x[0])
stamps = [[i] for i in range(maxlen)]
for ex in range(0, len(x)):
x[ex] = np.hstack((stamps, x[ex]))
lengths_by_type[l] = maxlen
x = slow_compressed_to_normal_by_type.get(l)
for ex in range(0, len(x)):
x[ex] = np.hstack((stamps, x[ex]))
x = fast_extended_to_normal_by_type.get(l)
for ex in range(0, len(x)):
x[ex] = np.hstack((stamps, x[ex]))
x = slow_by_type.get(l)
maxlen = len(x[0])
stamps = [[i] for i in range(maxlen)]
for ex in range(len(x)):
x[ex] = np.hstack((stamps, x[ex]))
x = exercises_extended_by_type.get(l)
for ex in range(len(x)):
x[ex] = np.hstack((stamps, x[ex]))
x = fast_extended_to_slow_by_type.get(l)
for ex in range(len(x)):
x[ex] = np.hstack((stamps, x[ex]))
x = fast_by_type.get(l)
maxlen = len(x[0])
stamps = [[i] for i in range(maxlen)]
for ex in range(len(x)):
x[ex] = np.hstack((stamps, x[ex]))
x = slow_compressed_to_fast_by_type.get(l)
for ex in range(len(x)):
x[ex] = np.hstack((stamps, x[ex]))
x = exercises_compressed_by_type.get(l)
for ex in range(len(x)):
x[ex] = np.hstack((stamps, x[ex]))
print("--------------------------------------")
labels = np.asarray(labels, dtype=np.int32)
exercise_data = np.asarray(exercise_data, dtype=np.float64)
tf_upper_data = exercise_data[:, 0:9]
tf_lower_data = exercise_data[:, 9:16]
tf_hand_data = exercise_data[:, 16:23]
imu_upper_data = exercise_data[:, 23:36]
imu_upper_data = imu_upper_data[:, 4:-3]
imu_lower_data = exercise_data[:, 36:49]
imu_lower_data = imu_lower_data[:, 4:-3]
imu_hand_data = exercise_data[:, 49:62]
imu_hand_data = imu_hand_data[:, 4:-3]
# print(exercise_data.shape)
# print('-------------')
# print('tf upper: {0}'.format(tf_upper_data.shape))
# print('tf lower: {0}'.format(tf_lower_data.shape))
# print('tf hand: {0}'.format(tf_hand_data.shape))
# print('-------------')
# print('imu upper: {0}'.format(imu_upper_data.shape))
# print('imu lower: {0}'.format(imu_lower_data.shape))
# print('imu hand: {0}'.format(imu_hand_data.shape))
# print('-------------')
full_data_tf = np.hstack((tf_upper_data, tf_lower_data))
full_data_tf = np.hstack((full_data_tf, tf_hand_data))
# print('tf full: {0}'.format(full_data_tf.shape))
# print('-------------')
full_data_imu = np.hstack((imu_upper_data, imu_lower_data))
full_data_imu = np.hstack((full_data_imu, imu_hand_data))
# print('imu full: {0}'.format(full_data_imu.shape))
# print('-------------')
full_data = np.hstack((full_data_imu, full_data_tf))
# print('full data: {0}'.format(full_data.shape))
# print('-------------')
training_data = []
training_labels = []
testing_data = []
testing_labels = []
print("Saving Test and training data")
for name in exNames:
tfExercise = exercises_by_type.get(name)
print(tfExercise[0].shape)
tfExerciseExtended = exercises_extended_by_type.get(name)
tfExerciseCompressed = exercises_compressed_by_type.get(name)
fastExtendedToNormalExercise = fast_extended_to_normal_by_type.get(name)
fastExtendedToSlowExercise = fast_extended_to_slow_by_type.get(name)
slowCompressedtoFastExercise = slow_compressed_to_normal_by_type.get(name)
slowCompressedtoNormalExercise = slow_compressed_to_fast_by_type.get(name)
slowTfExercise = slow_by_type.get(name)[0]
fastTfExercise = fast_by_type.get(name)[0]
sio.savemat('matfiles/FastExtendedToNormal' + NAMESCAP.get(name) + 'Data.mat',
mdict={'data': fastExtendedToNormalExercise})
sio.savemat('matfiles/FastExtendedToSlow' + NAMESCAP.get(name) + 'Data.mat',
mdict={'data': fastExtendedToSlowExercise})
sio.savemat('matfiles/SlowCompressedToNormal' + NAMESCAP.get(name) + 'Data.mat',
mdict={'data': slowCompressedtoNormalExercise})
sio.savemat('matfiles/SlowCompressedToFast' + NAMESCAP.get(name) + 'Data.mat',
mdict={'data': slowCompressedtoFastExercise})
sio.savemat('matfiles/Slow' + NAMESCAP.get(name) + 'Data.mat', mdict={'data': slowTfExercise})
sio.savemat('matfiles/Fast' + NAMESCAP.get(name) + 'Data.mat', mdict={'data': fastTfExercise})
tfExerciseTrain = tfExercise[0:int((len(tfExercise)) * 0.6)]
tfExerciseTest = tfExercise[int((len(tfExercise)) * 0.6):len(tfExercise)]
tfExExtendedTrain = tfExerciseExtended[0:(len(tfExerciseExtended)) / 10 * 6]
tfExExtendedTest = tfExerciseExtended[(len(tfExerciseExtended)) / 10 * 6 + 1:len(tfExerciseExtended)]
tfExCompressedTrain = tfExerciseCompressed[0:(len(tfExerciseCompressed)) / 10 * 6]
tfExCompressedTest = tfExerciseCompressed[(len(tfExerciseCompressed)) / 10 * 6 + 1:len(tfExerciseCompressed)]
print("Training data size: " + str(len(tfExerciseTrain)))
print("Tesing data size: " + str(len(tfExerciseTest)))
for i in tfExerciseTrain:
for x in i:
training_data.append(x)
train_by_type.get(name).append(x)
training_labels.append(LABELS.get(name))
sio.savemat('matfiles/' + NAMESCAP.get(name) + 'Data.mat', mdict={'data': train_by_type.get(name)})
for i in tfExExtendedTrain:
for x in i:
train_extended_by_type.get(name).append(x)
sio.savemat('matfiles/' + NAMESCAP.get(name) + 'ExtendedData.mat', mdict={'data': train_extended_by_type.get(name)})
for i in tfExCompressedTrain:
for x in i:
train_compressed_by_type.get(name).append(x)
sio.savemat('matfiles/' + NAMESCAP.get(name) + 'CompressedData.mat',
mdict={'data': train_compressed_by_type.get(name)})
for i in tfExerciseTest:
for x in i:
testing_data.append(x)
test_by_type.get(name).append(x)
testing_labels.append(LABELS.get(name))
# print(l + ' ' + str(LABELS.get(name)))
sio.savemat('matfiles/' + NAMESCAP.get(name) + 'DataTest.mat', mdict={'data': test_by_type.get(name)})
for i in tfExExtendedTest:
for x in i:
# print(x)
test_extended_by_type.get(name).append(x)
sio.savemat('matfiles/' + NAMESCAP.get(name) + 'ExtendedDataTest.mat',
mdict={'data': test_extended_by_type.get(name)})
for i in tfExCompressedTest:
for x in i:
test_compressed_by_type.get(name).append(x)
sio.savemat('matfiles/' + NAMESCAP.get(name) + 'CompressedDataTest.mat',
mdict={'data': test_compressed_by_type.get(name)})
| gpl-3.0 |
sgenoud/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 3 | 5350 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of vizualisation, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwith parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print __doc__
# Author: Virgile Fritsch <[email protected]>
# License: BSD
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.iteritems()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1.values()[0].collections[0],
legend1.values()[1].collections[0],
legend1.values()[2].collections[0]),
(legend1.keys()[0], legend1.keys()[1], legend1.keys()[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teatcher ratio by town")
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2.values()[0].collections[0],
legend2.values()[1].collections[0],
legend2.values()[2].collections[0]),
(legend2.keys()[0], legend2.keys()[1], legend2.keys()[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
victorbergelin/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
wzbozon/statsmodels | statsmodels/sandbox/nonparametric/tests/ex_gam_am_new.py | 34 | 2606 | # -*- coding: utf-8 -*-
"""Example for gam.AdditiveModel and PolynomialSmoother
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Created on Fri Nov 04 13:45:43 2011
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, zip
import time
import numpy as np
#import matplotlib.pyplot as plt
from numpy.testing import assert_almost_equal
from scipy import stats
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.regression.linear_model import OLS, WLS
np.random.seed(8765993)
#seed is chosen for nice result, not randomly
#other seeds are pretty off in the prediction
#DGP: simple polynomial
order = 3
sigma_noise = 0.5
nobs = 1000 #1000 #with 1000, OLS and Additivemodel aggree in params at 2 decimals
lb, ub = -3.5, 4#2.5
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*2, x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) / 2.
z = y_true #alias check
d = x
y = y_true + sigma_noise * np.random.randn(nobs)
example = 1
if example == 1:
m = AdditiveModel(d)
m.fit(y)
y_pred = m.results.predict(d)
for ss in m.smoothers:
print(ss.params)
res_ols = OLS(y, exog_reduced).fit()
print(res_ols.params)
#assert_almost_equal(y_pred, res_ols.fittedvalues, 3)
if example > 0:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(exog)
y_pred = m.results.mu# + m.results.alpha #m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(y, '.', alpha=0.25)
plt.plot(y_true, 'k-', label='true')
plt.plot(res_ols.fittedvalues, 'g-', label='OLS', lw=2, alpha=-.7)
plt.plot(y_pred, 'r-', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel')
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], y[sortidx], '.', alpha=0.25)
plt.plot(xx[sortidx], y_true[sortidx], 'k.', label='true', lw=2)
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel ' + ii)
counter += 1
plt.show() | bsd-3-clause |
kevin-intel/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 8 | 2074 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import ConfusionMatrixDisplay
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01).fit(X_train, y_train)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = ConfusionMatrixDisplay.from_estimator(
classifier, X_test, y_test, display_labels=class_names,
cmap=plt.cm.Blues, normalize=normalize
)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
| bsd-3-clause |
manojgudi/sandhi | modules/gr36/gr-filter/examples/decimate.py | 13 | 5841 | #!/usr/bin/env python
#
# Copyright 2009,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 10000000 # number of samples to use
self._fs = 10000 # initial sampling rate
self._decim = 20 # Decimation rate
# Generate the prototype filter taps for the decimators with a 200 Hz bandwidth
self._taps = filter.firdes.low_pass_2(1, self._fs,
200, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._decim))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._decim
print "Taps per channel: ", tpc
# Build the input signal source
# We create a list of freqs, and a sine wave is generated and added to the source
# for each one of these frequencies.
self.signals = list()
self.add = gr.add_cc()
freqs = [10, 20, 2040]
for i in xrange(len(freqs)):
self.signals.append(gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct a PFB decimator filter
self.pfb = filter.pfb.decimator_ccf(self._decim, self._taps, 0)
# Construct a standard FIR decimating filter
self.dec = filter.fir_filter_ccf(self._decim, self._taps)
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Create the sink for the decimated siganl
self.snk = gr.vector_sink_c()
self.connect(self.pfb, self.snk)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(16,9))
fig2 = pylab.figure(2, figsize=(16,9))
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input to the decimator
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b")
p1_t = sp1_t.plot(t_in, x_in.imag, "r")
sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot the output of the decimator
fs_o = tb._fs / tb._decim
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk.data()[Ns:Ns+Ne]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("PFB Decimated Signal", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
duttashi/Data-Analysis-Visualization | scripts/general/chiSquareTest.py | 1 | 3347 | __author__ = 'Ashoo'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats
import warnings
sns.set(color_codes=True)
# Reading the data where low_memory=False increases the program efficiency
data= pd.read_csv("gapminder.csv", low_memory=False)
# setting variables that you will be working with to numeric
data['breastcancerper100th']= data['breastcancerper100th'].convert_objects(convert_numeric=True)
data['femaleemployrate']= data['femaleemployrate'].convert_objects(convert_numeric=True)
data['alcconsumption']= data['alcconsumption'].convert_objects(convert_numeric=True)
#print "Showing missing data coulmn-wise"
#print data.isnull().sum()
# Create a copy of the original dataset as sub5 by using the copy() method
sub5=data.copy()
# Since the data is all continuous variables therefore the use the mean() for missing value imputation
sub5.fillna(sub5['breastcancerper100th'].mean(), inplace=True)
sub5.fillna(sub5['femaleemployrate'].mean(), inplace=True)
sub5.fillna(sub5['alcconsumption'].mean(), inplace=True)
# Showing the count of null values after imputation
#print sub5.isnull().sum()
# categorize quantitative variable based on customized splits using the cut function
sub5['alco']=pd.qcut(sub5.alcconsumption,6,labels=["0","1-4","5-9","10-14","15-19","20-24"])
sub5['brst']=pd.qcut(sub5.breastcancerper100th,5,labels=["1-20","21-40","41-60","61-80","81-90"])
# Converting response variable to categorical
sub5['brst']=sub5['brst'].astype('category')
# Cross tabulating the response variable with explantory variable
ct1=pd.crosstab(sub5['brst'],sub5['alco'])
#ct1=pd.crosstab(sub5['alco'],sub5['brst'])
print "Contigency Table"
print ct1
print "\n\n"
# the axis=0 statement tells python to sum all the values in each column in python
colsum=ct1.sum(axis=0)
colpct=ct1/colsum
print(colpct)
# Chi-Square
print('\n\nChi-square value, p value, expected counts')
cs1=scipy.stats.chi2_contingency(ct1)
print(cs1)
sub5['brst']=sub5['brst'].astype('category')
sub5['alco']=sub5['alco'].convert_objects(convert_numeric=True)
#sns.factorplot(x='alcconsumption', y='breastcancerper100th', data=sub5, kind="bar", ci=None)
sns.factorplot(x='alco', y='brst', data=sub5, kind="bar",ci=None)
plt.xlabel("Alcohol consumption in Liters")
plt.ylabel("Breast Cancer cases per 100th women")
# ====================================================
# POST HOC COMPARISON TEST
recode2={1-20:1,21-40:2}
sub5['COMP1v2']=sub5['brst'].map(recode2)
ct2=pd.crosstab(sub5['brst'],sub5['COMP1v2'])
print "Contigency Table -2\n"
print ct2
print "\n\n"
# the axis=0 statement tells python to sum all the values in each column in python
colsum=ct2.sum(axis=0)
colpct=ct2/colsum
print(colpct)
# Chi-Square
print('\n\nChi-square value, p value, expected counts')
cs2=scipy.stats.chi2_contingency(ct2)
print(cs2)
#######################################################
recode3={41-60:3,61-80:4}
sub5['COMP1v3']=sub5['alco'].map(recode3)
ct3=pd.crosstab(sub5['brst'],sub5['COMP1v3'])
print "Contigency Table - 3\n"
print ct3
print "\n\n"
# the axis=0 statement tells python to sum all the values in each column in python
colsum=ct3.sum(axis=0)
colpct=ct3/colsum
print(colpct)
# Chi-Square
print('\n\nChi-square value, p value, expected counts')
cs3=scipy.stats.chi2_contingency(ct3)
print(cs3)
| mit |
pp-mo/iris | docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py | 5 | 1493 | import iris
import iris.analysis
import iris.plot as iplt
import matplotlib.pyplot as plt
import matplotlib.colors
import numpy as np
global_air_temp = iris.load_cube(iris.sample_data_path("air_temp.pp"))
regional_ash = iris.load_cube(iris.sample_data_path("NAME_output.txt"))
regional_ash = regional_ash.collapsed("flight_level", iris.analysis.SUM)
# Mask values so low that they are anomalous.
regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6)
norm = matplotlib.colors.LogNorm(5e-6, 0.0175)
global_air_temp.coord("longitude").guess_bounds()
global_air_temp.coord("latitude").guess_bounds()
fig = plt.figure(figsize=(8, 4.5))
plt.subplot(2, 2, 1)
iplt.pcolormesh(regional_ash, norm=norm)
plt.title("Volcanic ash total\nconcentration not regridded", size="medium")
for subplot_num, mdtol in zip([2, 3, 4], [0, 0.5, 1]):
plt.subplot(2, 2, subplot_num)
scheme = iris.analysis.AreaWeighted(mdtol=mdtol)
global_ash = regional_ash.regrid(global_air_temp, scheme)
iplt.pcolormesh(global_ash, norm=norm)
plt.title(
"Volcanic ash total concentration\n"
"regridded with AreaWeighted(mdtol={})".format(mdtol),
size="medium",
)
plt.subplots_adjust(
hspace=0, wspace=0.05, left=0.001, right=0.999, bottom=0, top=0.955
)
# Iterate over each of the figure's axes, adding coastlines, gridlines
# and setting the extent.
for ax in fig.axes:
ax.coastlines("50m")
ax.gridlines()
ax.set_extent([-80, 40, 31, 75])
plt.show()
| lgpl-3.0 |
nomadcube/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
berkeley-stat222/mousestyles | mousestyles/dynamics/tests/test_dynamics.py | 3 | 9981 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pytest
import numpy as np
import pandas as pd
from mousestyles.dynamics import (create_time_matrix,
get_prob_matrix_list,
get_prob_matrix_small_interval,
mcmc_simulation, get_score,
find_best_interval)
def test_creat_time_matrix_input():
# checking functions raise the correct errors for wrong input
# time_gap is zeros
err_string = "time_gap should be nonnegative int or float"
with pytest.raises(ValueError) as excinfo:
create_time_matrix(combined_gap=4, time_gap=0, days_index=137)
assert excinfo.value.args[0] == err_string
# time_gap is negative
with pytest.raises(ValueError) as excinfo:
create_time_matrix(combined_gap=4, time_gap=-1, days_index=137)
assert excinfo.value.args[0] == err_string
# combined_ap is negative value
err_string = "combined_gap should be nonnegative int or float"
with pytest.raises(ValueError) as excinfo:
create_time_matrix(combined_gap=-1, time_gap=1, days_index=137)
assert excinfo.value.args[0] == err_string
# min_path_length cannot be floating number
# days_index is negative value
with pytest.raises(ValueError) as excinfo:
create_time_matrix(combined_gap=4, time_gap=1, days_index=-1)
assert excinfo.value.args[0] == "days_index should be nonnegative int"
# days_index is float value
with pytest.raises(ValueError) as excinfo:
create_time_matrix(combined_gap=4, time_gap=1, days_index=0.1)
assert excinfo.value.args[0] == "days_index should be nonnegative int"
def test_creat_time_matrix():
# Checking functions output the correct time matrix
matrix = create_time_matrix(combined_gap=4, time_gap=1, days_index=0)
assert matrix.iloc[0, 2181] == 1.0
def test_get_prob_matrix_list_input():
# checking functions raise the correct errors for wrong input
# time_df is not DataFrame
with pytest.raises(ValueError) as excinfo:
get_prob_matrix_list(time_df=5, interval_length=1000)
assert excinfo.value.args[0] == "time_df should be pandas DataFrame"
# interval_length is 0
row_i = np.hstack((np.zeros(13), np.ones(10),
np.ones(10) * 2, np.ones(10) * 3))
time_df_eg = np.vstack((row_i, row_i, row_i))
time_df_eg = pd.DataFrame(time_df_eg)
with pytest.raises(ValueError) as excinfo:
get_prob_matrix_list(time_df=time_df_eg, interval_length=0)
assert excinfo.value.args[0] == "interval_length should be positive int"
# interval_length is not int
with pytest.raises(ValueError) as excinfo:
get_prob_matrix_list(time_df=time_df_eg, interval_length=0.5)
assert excinfo.value.args[0] == "interval_length should be positive int"
def test_get_prob_matrix_list():
# Checking functions output the correct matrix list
row_i = np.hstack((np.zeros(13), np.ones(10),
np.ones(10) * 2, np.ones(10) * 3))
time_df_eg = np.vstack((row_i, row_i, row_i))
time_df_eg = pd.DataFrame(time_df_eg)
mat_list = get_prob_matrix_list(time_df_eg,
interval_length=10)
assert mat_list[0][0, 0] == 1.
assert sum(sum(mat_list[0])) == 1.
def test_get_prob_matrix_small_interval_input():
# checking functions raise the correct errors for wrong input
# string_list is not list
with pytest.raises(ValueError) as excinfo:
get_prob_matrix_small_interval(string_list=np.array([1, 2]))
assert excinfo.value.args[0] == "string_list should be a list"
# items in string_list is not string
time_list = [0, 1, 2]
with pytest.raises(ValueError) as excinfo:
get_prob_matrix_small_interval(string_list=time_list)
assert excinfo.value.args[0] == "items in string_list should be str"
def test_get_prob_matrix_small_interval():
# Checking functions output the correct matrix
time_list = ['002', '001', '012']
example = get_prob_matrix_small_interval(time_list)
assert example[0, 0] == 0.4
assert example[0, 1] == 0.4
assert example[0, 2] == 0.2
assert example[1, 2] == 1.
assert sum(example[0, :]) == 1.
def test_mcmc_simulation_input():
# checking functions raise the correct errors for wrong input
# mat_list is not list
with pytest.raises(ValueError) as excinfo:
mcmc_simulation(mat_list=np.array([1, 2]), n_per_int=10)
assert excinfo.value.args[0] == "mat_list should be a list"
# items in mat_list is not string
time_list = [0, 1, 2]
with pytest.raises(ValueError) as excinfo:
mcmc_simulation(mat_list=time_list, n_per_int=10)
assert excinfo.value.args[0] == "items in mat_list should be numpy array"
# n_per_int is not integer
mat0 = np.zeros(16).reshape(4, 4)
np.fill_diagonal(mat0, val=1)
mat1 = np.zeros(16).reshape(4, 4)
mat1[0, 1] = 1
mat1[1, 0] = 1
mat1[2, 2] = 1
mat1[3, 3] = 1
mat_list_example = [mat0, mat1]
with pytest.raises(ValueError) as excinfo:
mcmc_simulation(mat_list=mat_list_example, n_per_int=0.5)
assert excinfo.value.args[0] == "n_per_int should be positive int"
# n_per_int negative integer
with pytest.raises(ValueError) as excinfo:
mcmc_simulation(mat_list=mat_list_example, n_per_int=-1)
assert excinfo.value.args[0] == "n_per_int should be positive int"
def test_mcmc_simulation():
# Checking functions output the array
mat0 = np.zeros(16).reshape(4, 4)
np.fill_diagonal(mat0, val=1)
mat1 = np.zeros(16).reshape(4, 4)
mat1[0, 1] = 1
mat1[1, 0] = 1
mat1[2, 2] = 1
mat1[3, 3] = 1
mat_list_example = [mat0, mat1]
example = mcmc_simulation(mat_list_example, 10)
assert sum(example[:10]) == 0.
assert sum(example[10:]) == 5.
assert example[10] == 1.
assert example[11] == 0.
def test_get_score_input():
# checking functions raise the correct errors for wrong input
# true_day is not numpy.array
with pytest.raises(ValueError) as excinfo:
get_score(true_day=0, simulated_day=np.zeros(13))
assert excinfo.value.args[0] == "true_day should be numpy array"
# simulated_day is not numpy.array
with pytest.raises(ValueError) as excinfo:
get_score(true_day=np.zeros(13), simulated_day=0)
assert excinfo.value.args[0] == "simulated_day should be numpy array"
# weight should be list
with pytest.raises(ValueError) as excinfo:
get_score(true_day=np.zeros(13), simulated_day=np.zeros(13),
weight=0)
assert excinfo.value.args[0] == "weight should be list"
# length of weight should be exactly 4
with pytest.raises(ValueError) as excinfo:
get_score(true_day=np.zeros(13), simulated_day=np.zeros(13),
weight=[0])
assert excinfo.value.args[0] == "Length of weight should be 4"
# check lengths of true_day and simulated_day
with pytest.raises(ValueError) as excinfo:
get_score(true_day=np.zeros(13), simulated_day=np.zeros(5))
error_message = "Length of simulated_day is smaller than true_day"
assert excinfo.value.args[0] == error_message
# check all the weights are positive
with pytest.raises(ValueError) as excinfo:
get_score(true_day=np.zeros(13), simulated_day=np.zeros(13),
weight=[-1, 2, 3, 4])
assert excinfo.value.args[0] == "All the weights should be positive"
def test_get_score():
# Checking functions output the correct score
true_day_1 = np.zeros(13)
simulated_day_1 = np.ones(13)
score_1 = get_score(true_day_1, simulated_day_1)
true_day_2 = np.ones(13)
simulated_day_2 = np.ones(13)
score_2 = get_score(true_day_2, simulated_day_2)
assert score_1 == 0.0
assert score_2 == 10.0
def test_find_best_interval_input():
# checking functions raise the correct errors for wrong input
# time_df is not DataFrame
with pytest.raises(ValueError) as excinfo:
find_best_interval(df=5, strain_num=2)
assert excinfo.value.args[0] == "df should be pandas DataFrame"
# strain_num is not integer in 0,1,2
row_i = np.hstack((np.zeros(13), np.ones(10),
np.ones(10) * 2, np.ones(10) * 3))
time_df_eg = np.vstack((row_i, row_i, row_i))
time_df_eg = pd.DataFrame(time_df_eg)
time_df_eg.rename(columns={0: 'strain'}, inplace=True)
with pytest.raises(ValueError) as excinfo:
find_best_interval(df=time_df_eg, strain_num=3)
assert excinfo.value.args[0] == "strain_num can only be 0, 1, 2"
# interval_length_initial is a numpy array with positive integers
with pytest.raises(ValueError) as excinfo:
find_best_interval(df=time_df_eg, strain_num=0,
interval_length_initial=3)
assert excinfo.value.args[0] == "interval_length_initial positive np.array"
with pytest.raises(ValueError) as excinfo:
find_best_interval(df=time_df_eg, strain_num=0,
interval_length_initial=np.array([1, 2, -1]))
assert excinfo.value.args[0] == "interval_length_initial positive np.array"
with pytest.raises(ValueError) as excinfo:
find_best_interval(df=time_df_eg, strain_num=0,
interval_length_initial=np.array([1, 2, 3.1]))
assert excinfo.value.args[0] == "interval_length_initial positive np.array"
def test_find_best_interval():
row_i = np.hstack((np.zeros(40)))
time_df_eg = np.vstack((row_i, row_i, row_i))
time_df_eg = pd.DataFrame(time_df_eg)
time_df_eg.rename(columns={0: 'strain'}, inplace=True)
time, fake, score = find_best_interval(time_df_eg, 0,
np.arange(10, 40, 10))
assert time == 10
assert np.array_equal(fake, np.zeros(40))
assert 1 - score < 0.05
| bsd-2-clause |
jostep/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 2 | 9909 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
random.shuffle(context_words)
words_to_use = collections.deque(context_words)
for j in range(num_skips):
batch[i * num_skips + j] = buffer[skip_window]
context_word = words_to_use.pop()
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer[:] = data[:span]
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
tszabo-ro/paparazzi | sw/tools/tcp_aircraft_server/phoenix/__init__.py | 86 | 4470 | #Copyright 2014, Antoine Drouin
"""
Phoenix is a Python library for interacting with Paparazzi
"""
import math
"""
Unit convertions
"""
def rad_of_deg(d): return d/180.*math.pi
def deg_of_rad(r): return r*180./math.pi
def rps_of_rpm(r): return r*2.*math.pi/60.
def rpm_of_rps(r): return r/2./math.pi*60.
def m_of_inch(i): return i*0.0254
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color' : 'k', 'fontsize' : 20 }
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig == None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend <> None:
ax.legend(legend, loc='best')
if xlim <> None:
ax.set_xlim(xlim[0], xlim[1])
if ylim <> None:
ax.set_ylim(ylim[0], ylim[1])
"""
Messages
"""
#: dictionary mapping the C type to its length in bytes (e.g char -> 1)
TYPE_TO_LENGTH_MAP = {
"char" : 1,
"uint8" : 1,
"int8" : 1,
"uint16" : 2,
"int16" : 2,
"uint32" : 4,
"int32" : 4,
"float" : 4,
"double" : 8,
}
#: dictionary mapping the C type to correct format string
TYPE_TO_PRINT_MAP = {
float : "%f",
str : "%s",
chr : "%c",
int : "%d"
}
ACID_ALL = 0xFF
ACID_TEST = 0xFE
ACID_GROUNDSTATION = 0xFD
#: dictionary mapping debug types to format characters
DEBUG_MESSAGES = {
"DEBUG_UINT8" : "%d",
"DEBUG_INT32" : "%d",
"DEBUG_FLOAT" : "%#f"
}
"""
Binary logs
See format description in sw/airborne/subsystems/datalink/fms_link.c
"""
import struct
def hex_of_bin(b): return ' '.join( [ "%02X" % ord( x ) for x in b ] )
import pdb
def read_binary_log(filename, tick_freq = 2*512.):
f = open(filename, "rb")
d = f.read()
packet_header_len = 6
msg_header_len = 2
def read_packet(d, packet_start):
payload_start = packet_start+packet_header_len
timestamp, payload_len = struct.unpack("IH", d[packet_start:payload_start])
msgs = read_packet_payload(d, payload_start, payload_len)
next_packet = payload_start+payload_len+2
return timestamp, msgs, next_packet
def read_packet_payload(d, s, l):
msgs = []
packet_end = s+l; msg_start = s
while msg_start<packet_end:
payload_start = msg_start+msg_header_len
msg_len, msg_id = struct.unpack("BB", d[msg_start:payload_start])
payload_end = payload_start+msg_len
msg_payload = d[payload_start:payload_end]
msgs.append([msg_id, msg_payload])
#print msg_id, msg_len, hex_of_bin(msg_payload)
msg_start = payload_end
return msgs
packets = []
packet_start=0
while packet_start<len(d):
timestamp, msgs, next_packet = read_packet(d, packet_start)
packets.append([timestamp/tick_freq, msgs])
#print timestamp, msgs
packet_start = next_packet
f.close()
return packets
def extract_from_binary_log(protocol, packets, msg_names, t_min=None, t_max=None):
ret = [{'time':[], 'data':[]} for m in msg_names]
if t_min == None: t_min = packets[0][0]
if t_max == None: t_max = packets[-1][0]
for t, msgs in packets:
if t>= t_min and t<= t_max:
for id, payload in msgs:
m = protocol.get_message_by_id('telemetry', id)
try: i = msg_names.index(m.name)
except: pass
finally: ret[i]['time'].append(t); ret[i]['data'].append(m.unpack_scaled_values(payload))
return ret
| gpl-2.0 |
stonneau/cwc_tests | src/tools/plot_utils.py | 2 | 11856 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 16 09:16:56 2015
@author: adelpret
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
DEFAULT_FONT_SIZE = 40;
DEFAULT_AXIS_FONT_SIZE = DEFAULT_FONT_SIZE;
DEFAULT_LINE_WIDTH = 8; #13;
DEFAULT_MARKER_SIZE = 6;
DEFAULT_FONT_FAMILY = 'sans-serif'
DEFAULT_FONT_SIZE = DEFAULT_FONT_SIZE;
DEFAULT_FONT_SERIF = ['Times New Roman', 'Times','Bitstream Vera Serif', 'DejaVu Serif', 'New Century Schoolbook', 'Century Schoolbook L', 'Utopia', 'ITC Bookman', 'Bookman', 'Nimbus Roman No9 L', 'Palatino', 'Charter', 'serif'];
DEFAULT_FIGURE_FACE_COLOR = 'white' # figure facecolor; 0.75 is scalar gray
DEFAULT_LEGEND_FONT_SIZE = DEFAULT_FONT_SIZE;
DEFAULT_AXES_LABEL_SIZE = DEFAULT_FONT_SIZE; # fontsize of the x any y labels
DEFAULT_TEXT_USE_TEX = True;
LINE_ALPHA = 0.9;
SAVE_FIGURES = False;
FILE_EXTENSIONS = ['png']; #,'eps'];
FIGURES_DPI = 150;
SHOW_LEGENDS = False;
LEGEND_ALPHA = 0.5;
SHOW_FIGURES = False;
FIGURE_PATH = './';
LINE_WIDTH_RED = 0; # reduction of line width when plotting multiple lines on same plot
LINE_WIDTH_MIN = 1;
BOUNDS_COLOR = 'silver';
#legend.framealpha : 1.0 # opacity of of legend frame
#axes.hold : True # whether to clear the axes by default on
#axes.linewidth : 1.0 # edge linewidth
#axes.titlesize : large # fontsize of the axes title
#axes.color_cycle : b, g, r, c, m, y, k # color cycle for plot lines
#xtick.labelsize : medium # fontsize of the tick labels
#figure.dpi : 80 # figure dots per inch
#image.cmap : jet # gray | jet etc...
#savefig.dpi : 100 # figure dots per inch
#savefig.facecolor : white # figure facecolor when saving
#savefig.edgecolor : white # figure edgecolor when saving
#savefig.format : png # png, ps, pdf, svg
#savefig.jpeg_quality: 95 # when a jpeg is saved, the default quality parameter.
#savefig.directory : ~ # default directory in savefig dialog box,
# leave empty to always use current working directory
def create_empty_figure(nRows=1, nCols=1, spinesPos=None,sharex=True):
f, ax = plt.subplots(nRows,nCols,sharex=sharex);
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(50,50,1080,720);
if(spinesPos!=None):
if(nRows*nCols>1):
for axis in ax.reshape(nRows*nCols):
movePlotSpines(axis, spinesPos);
else:
movePlotSpines(ax, spinesPos);
return (f, ax);
def movePlotSpines(ax, spinesPos):
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',spinesPos[0]))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',spinesPos[1]))
def setAxisFontSize(ax, size):
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(size)
label.set_bbox(dict(facecolor='white', edgecolor='None', alpha=0.65))
mpl.rcdefaults()
mpl.rcParams['lines.linewidth'] = DEFAULT_LINE_WIDTH;
mpl.rcParams['lines.markersize'] = DEFAULT_MARKER_SIZE;
mpl.rcParams['font.family'] = DEFAULT_FONT_FAMILY;
mpl.rcParams['font.size'] = DEFAULT_FONT_SIZE;
mpl.rcParams['font.serif'] = DEFAULT_FONT_SERIF;
mpl.rcParams['text.usetex'] = DEFAULT_TEXT_USE_TEX;
mpl.rcParams['axes.labelsize'] = DEFAULT_AXES_LABEL_SIZE;
mpl.rcParams['legend.fontsize'] = DEFAULT_LEGEND_FONT_SIZE;
mpl.rcParams['figure.facecolor'] = DEFAULT_FIGURE_FACE_COLOR;
mpl.rcParams['figure.figsize'] = 12, 9 #23, 12 #
def plot3dQuantity(quantity, title, ax=None, boundUp=None, boundLow=None, yscale='linear', linestyle='k'):
return plotNdQuantity(3, 1, quantity, title, ax, boundUp, boundLow, yscale, linestyle);
def plotNdQuantity(nRows, nCols, quantity, title="", ax=None, boundUp=None, boundLow=None, yscale='linear',
linestyle='k--', sharey=False, margins=None):
t = quantity.shape[0];
n = quantity.shape[1];
if(margins!=None):
if(type(margins) is list):
margins = [margins[0].reshape(t,1,n), margins[1].reshape(t,1,n)];
else:
margins = margins.reshape(t,1,n);
return plotNdQuantityPerSolver(nRows, nCols, quantity.reshape(t,1,n), title, None, [linestyle], ax,
boundUp, boundLow, yscale, None, None, sharey, margins);
def plotNdQuantityPerSolver(nRows, nCols, quantity, title, solver_names, line_styles, ax=None, boundUp=None, boundLow=None,
yscale='linear', subplot_titles=None, ylabels=None, sharey=False, margins=None, x=None):
if(ax==None):
f, ax = plt.subplots(nRows, nCols, sharex=True, sharey=sharey);
ax = ax.reshape(nRows, nCols);
k = 0;
if(x==None):
x = range(quantity.shape[0]);
for j in range(nCols):
for i in range(nRows):
if(k<quantity.shape[2]):
if(subplot_titles!=None):
ax[i,j].set_title(subplot_titles[k]);
elif(i==0):
ax[i,j].set_title(str(k)); # set titles on first row only
if(ylabels!=None):
ax[i,j].set_ylabel(ylabels[k]);
ymin = np.min(quantity[:,:,k]);
ymax = np.max(quantity[:,:,k]);
if(boundUp!=None):
if(len(boundUp.shape)==1): # constant bound
if(boundUp[k]<2*ymax):
ymax = np.max([ymax,boundUp[k]]);
ax[i,j].plot([0, quantity.shape[0]-1], [boundUp[k], boundUp[k]], '--', color=BOUNDS_COLOR, alpha=LINE_ALPHA);
elif(len(boundUp.shape)==2): # bound variable in time but constant for each solver
if(np.max(boundUp[:,k])<2*ymax):
ymax = np.max(np.concatenate(([ymax],boundUp[:,k])));
ax[i,j].plot(boundUp[:,k], '--', color=BOUNDS_COLOR, label='Upper bound', alpha=LINE_ALPHA);
if(boundLow!=None):
if(len(boundLow.shape)==1):
if(boundLow[k]>2*ymin):
ymin = np.min([ymin,boundLow[k]]);
ax[i,j].plot([0, quantity.shape[0]-1], [boundLow[k], boundLow[k]], '--', color=BOUNDS_COLOR, alpha=LINE_ALPHA);
else:
if(np.min(boundLow[:,k])>2*ymin):
ymin = np.min(np.concatenate(([ymin],boundLow[:,k])));
ax[i,j].plot(boundLow[:,k], '--', color=BOUNDS_COLOR, label='Lower bound', alpha=LINE_ALPHA);
lw = DEFAULT_LINE_WIDTH;
for s in range(quantity.shape[1]):
p, = ax[i,j].plot(x, quantity[:,s,k], line_styles[s], alpha=LINE_ALPHA, linewidth=lw);
if(margins!=None):
if(type(margins) is list):
mp = margins[0];
mn = margins[1];
else:
mp = margins;
mn = margins;
ymax = np.max(np.concatenate(([ymax],quantity[:,s,k]+mp[:,s,k])));
ymin = np.min(np.concatenate(([ymin],quantity[:,s,k]-mn[:,s,k])));
ax[i,j].fill_between(x, quantity[:,s,k]+mp[:,s,k], quantity[:,s,k]-mn[:,s,k], alpha=0.15, linewidth=0, facecolor='green');
if(solver_names!=None):
p.set_label(solver_names[s]);
lw=max(LINE_WIDTH_MIN,lw-LINE_WIDTH_RED);
ax[i,j].set_yscale(yscale);
ax[i,j].xaxis.set_ticks(np.arange(0, x[-1], x[-1]/2));
ax[i,j].yaxis.set_ticks([ymin, ymax]);
if(ymax-ymin>5.0):
ax[i,j].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.0f'));
elif(ymax-ymin>0.5):
ax[i,j].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'));
else:
ax[i,j].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'));
if(sharey==False):
ax[i,j].set_ylim([ymin-0.1*(ymax-ymin), ymax+0.1*(ymax-ymin)]);
k += 1;
else:
ax[i,j].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.0f'));
if(SAVE_FIGURES):
for ext in FILE_EXTENSIONS:
plt.gcf().savefig(FIGURE_PATH+title.replace(' ', '_')+'.'+ext, format=ext, dpi=FIGURES_DPI, bbox_inches='tight');
else:
ax[nRows/2,0].set_ylabel(title);
if(SHOW_LEGENDS):
# leg = ax[0,0].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
leg = ax[0,0].legend(loc='best');
# leg.get_frame().set_alpha(LEGEND_ALPHA)
return ax;
def plotQuantityPerSolver(quantity, title, solver_names, line_styles, yscale='linear', ylabel='',
x=None, xlabel='', legend_location='best'):
f, ax = plt.subplots();
lw = DEFAULT_LINE_WIDTH;
if(x==None):
x = range(quantity.shape[0]);
for i in range(len(solver_names)):
ax.plot(x, quantity[:,i], line_styles[i], alpha=LINE_ALPHA, linewidth=lw);
lw=max(lw-LINE_WIDTH_RED,LINE_WIDTH_MIN);
ax.set_yscale(yscale);
ax.set_ylabel(ylabel);
ax.set_xlabel(xlabel);
ymin = np.min(quantity);
ymax = np.max(quantity);
ax.set_ylim([ymin-0.1*(ymax-ymin), ymax+0.1*(ymax-ymin)]);
if(SHOW_LEGENDS):
leg = ax.legend(solver_names, loc=legend_location);
leg.get_frame().set_alpha(LEGEND_ALPHA)
if(SAVE_FIGURES):
for ext in FILE_EXTENSIONS:
plt.gcf().savefig(FIGURE_PATH+title.replace(' ', '_')+'.'+ext, format=ext, dpi=FIGURES_DPI, bbox_inches='tight');
elif(ylabel==''):
ax.set_ylabel(title);
def plotQuantityVsQuantityPerSolver(quantity, quantityPerSolver, legend, solver_names, line_styles, yscale='linear'):
r=0;
c=0;
if(len(solver_names)==4 or len(solver_names)==3):
r=2;
c=2;
elif(len(solver_names)==5 or len(solver_names)==6):
r=2;
c=3;
else:
print "ERROR in plotQuantityVsQuantityPerSolver, number of solvers not managed";
return;
f, ax = plt.subplots(r, c, sharex=True, sharey=True);
for i in range(len(solver_names)):
ax[i/c,i%c].plot(quantity[:,i], 'kx-', quantityPerSolver[:,i], line_styles[i], alpha=LINE_ALPHA);
ax[i/c,i%c].set_ylabel(solver_names[i]);
ax[i/c,i%c].set_yscale(yscale);
if(SAVE_FIGURES):
for ext in FILE_EXTENSIONS:
f.savefig(FIGURE_PATH+(legend[0]+'_VS_'+legend[1]).replace(' ', '_')+'.'+ext, format=ext, dpi=FIGURES_DPI, bbox_inches='tight');
if(SHOW_LEGENDS):
leg = ax[0,0].legend(legend, loc='best');
leg.get_frame().set_alpha(LEGEND_ALPHA)
def grayify_cmap(cmap):
"""Return a grayscale version of the colormap"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# convert RGBA to perceived greyscale luminance
# cf. http://alienryderflex.com/hsp.html
RGB_weight = [0.299, 0.587, 0.114]
luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
colors[:, :3] = luminance[:, np.newaxis]
return cmap.from_list(cmap.name + "_grayscale", colors, cmap.N)
def saveFigure(title):
if(SAVE_FIGURES):
for ext in FILE_EXTENSIONS:
plt.gcf().savefig(FIGURE_PATH+title.replace(' ', '_')+'.'+ext, format=ext, dpi=FIGURES_DPI, bbox_inches='tight'); | gpl-3.0 |
sebp/scikit-survival | tests/test_stacking.py | 1 | 8545 | import numpy
from numpy.testing import assert_array_almost_equal
import pytest
from sklearn.base import BaseEstimator
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sksurv.linear_model import CoxPHSurvivalAnalysis
from sksurv.meta import MeanEstimator, Stacking
from sksurv.svm import FastSurvivalSVM
from sksurv.testing import assert_cindex_almost_equal
class _NoFitEstimator(BaseEstimator):
pass
class _NoPredictDummy(BaseEstimator):
def fit(self, X, y):
pass
class _PredictDummy(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
pass
class _PredictProbaDummy(BaseEstimator):
def fit(self, X, y):
pass
def predict_proba(self, X):
pass
class TestStackingClassifier:
@staticmethod
@pytest.mark.parametrize('estimator', [_NoFitEstimator, _NoPredictDummy])
def test_base_estimator(estimator):
with pytest.raises(TypeError,
match=r"All base estimators should implement fit and predict/predict_proba (.+) doesn't"):
Stacking(_PredictDummy, [('m1', estimator)])
@staticmethod
def test_meta_no_fit():
with pytest.raises(TypeError,
match=r"meta estimator should implement fit (.+) doesn't"):
Stacking(_NoFitEstimator, [('m1', _PredictDummy)])
@staticmethod
def test_names_not_unique():
with pytest.raises(ValueError,
match=r"Names provided are not unique: \('m1', 'm2', 'm1'\)"):
Stacking(_NoFitEstimator,
[('m1', _PredictDummy), ('m2', _PredictDummy), ('m1', _PredictDummy)])
@staticmethod
def test_fit():
data = load_iris()
x = data["data"]
y = data["target"]
meta = Stacking(LogisticRegression(solver='liblinear', multi_class='ovr'),
[('tree', DecisionTreeClassifier(max_depth=1, random_state=0)),
('svm', SVC(probability=True, gamma='auto', random_state=0))])
assert 2 == len(meta)
meta.fit(x, y)
p = meta._predict_estimators(x)
assert (x.shape[0], 3 * 2) == p.shape
assert (3, 3 * 2) == meta.meta_estimator.coef_.shape
@staticmethod
def test_fit_sample_weights():
data = load_iris()
x = data["data"]
y = data["target"]
meta = Stacking(LogisticRegression(solver='liblinear', multi_class='ovr'),
[('tree', DecisionTreeClassifier(max_depth=1, random_state=0)),
('svm', SVC(probability=True, gamma='auto', random_state=0))])
sample_weight = numpy.random.RandomState(0).uniform(size=x.shape[0])
meta.fit(x, y, tree__sample_weight=sample_weight, svm__sample_weight=sample_weight)
@staticmethod
def test_set_params():
meta = Stacking(LogisticRegression(), [('tree', DecisionTreeClassifier(max_depth=1, random_state=0)),
('svm', SVC(probability=True, random_state=0))],
probabilities=True)
assert 2 == len(meta)
meta.set_params(tree__min_samples_split=7, svm__C=0.05)
assert 7 == meta.get_params()["tree__min_samples_split"]
assert 0.05 == meta.get_params()["svm__C"]
assert isinstance(meta.get_params()["meta_estimator"], LogisticRegression)
assert meta.get_params()["probabilities"]
meta.set_params(meta_estimator=DecisionTreeClassifier(), probabilities=False)
assert isinstance(meta.get_params()["meta_estimator"], DecisionTreeClassifier)
assert not meta.get_params()["probabilities"]
p = meta.get_params(deep=False)
assert set(p.keys()) == {"meta_estimator", "base_estimators", "probabilities"}
@staticmethod
def test_predict():
data = load_iris()
x = data["data"]
y = data["target"]
meta = Stacking(LogisticRegression(multi_class='multinomial', solver='lbfgs'),
[('tree', DecisionTreeClassifier(max_depth=1, random_state=0)),
('svm', SVC(probability=True, gamma='auto', random_state=0))])
assert 2 == len(meta)
meta.fit(x, y)
p = meta.predict(x)
acc = accuracy_score(y, p)
assert acc >= 0.98
@staticmethod
def test_predict_proba():
data = load_iris()
x = data["data"]
y = data["target"]
meta = Stacking(LogisticRegression(multi_class='multinomial', solver='lbfgs'),
[('tree', DecisionTreeClassifier(max_depth=1, random_state=0)),
('svm', SVC(probability=True, gamma='auto', random_state=0))])
meta.fit(x, y)
p = meta.predict_proba(x)
scores = numpy.empty(3)
for i, c in enumerate(meta.meta_estimator.classes_):
scores[i] = roc_auc_score(numpy.asarray(y == c, dtype=int), p[:, i])
assert_array_almost_equal(numpy.array([1.0, 0.9986, 0.9986]), scores)
@staticmethod
def test_predict_log_proba():
data = load_iris()
x = data["data"]
y = data["target"]
meta = Stacking(LogisticRegression(multi_class='multinomial', solver='lbfgs'),
[('tree', DecisionTreeClassifier(max_depth=1, random_state=0)),
('svm', SVC(probability=True, gamma='auto', random_state=0))])
meta.fit(x, y)
p = meta.predict_log_proba(x)
scores = numpy.empty(3)
for i, c in enumerate(meta.meta_estimator.classes_):
scores[i] = roc_auc_score(numpy.asarray(y == c, dtype=int), p[:, i])
assert_array_almost_equal(numpy.array([1.0, 0.9986, 0.9986]), scores)
class TestStackingSurvivalAnalysis:
@staticmethod
def test_fit(make_whas500):
whas500 = make_whas500(with_mean=False, with_std=False, to_numeric=True)
meta = Stacking(MeanEstimator(),
[('coxph', CoxPHSurvivalAnalysis()),
('svm', FastSurvivalSVM(random_state=0))],
probabilities=False)
assert 2 == len(meta)
meta.fit(whas500.x, whas500.y)
p = meta._predict_estimators(whas500.x)
assert (whas500.x.shape[0], 2) == p.shape
@staticmethod
def test_set_params():
meta = Stacking(_PredictDummy(),
[('coxph', CoxPHSurvivalAnalysis()),
('svm', FastSurvivalSVM(random_state=0))],
probabilities=False)
meta.set_params(coxph__alpha=1.0, svm__alpha=0.4132)
assert 1.0 == meta.get_params()["coxph__alpha"]
assert 0.4132 == meta.get_params()["svm__alpha"]
@staticmethod
def test_predict(make_whas500):
whas500 = make_whas500(with_mean=False, with_std=False, to_numeric=True)
meta = Stacking(MeanEstimator(),
[('coxph', CoxPHSurvivalAnalysis()),
('svm', FastSurvivalSVM(random_state=0))],
probabilities=False)
meta.fit(whas500.x, whas500.y)
# result is different if randomForestSRC has not been compiled with OpenMP support
p = meta.predict(whas500.x)
assert_cindex_almost_equal(whas500.y['fstat'], whas500.y['lenfol'], p,
(0.7848807, 58983, 16166, 0, 14))
@staticmethod
def test_predict_proba():
meta = Stacking(_PredictDummy(),
[('coxph', CoxPHSurvivalAnalysis()),
('svm', FastSurvivalSVM(random_state=0))],
probabilities=False)
with pytest.raises(AttributeError,
match="'_PredictDummy' object has no attribute 'predict_proba'"):
meta.predict_proba # pylint: disable=pointless-statement
@staticmethod
def test_score(make_whas500):
whas500 = make_whas500(with_mean=False, with_std=False, to_numeric=True)
meta = Stacking(MeanEstimator(),
[('coxph', CoxPHSurvivalAnalysis()),
('svm', FastSurvivalSVM(random_state=0))],
probabilities=False)
meta.fit(whas500.x, whas500.y)
c_index = meta.score(whas500.x, whas500.y)
assert round(abs(c_index - 0.7848807), 5) == 0
| gpl-3.0 |
mabelcalim/Taylor_diagram | lib/taylor_diag.py | 1 | 4099 | #!/usr/bin/python
# _*_ coding: latin-1 -*-
# Taylor Diagram - Based on Taylor (2001) - Journal Geophysical Research
# author: Mabel Calim Costa
# GMAO - INPE
# 20/02/2018
import numpy as np
from numpy import ma
import mpl_toolkits.axisartist.grid_finder as GF
import mpl_toolkits.axisartist.floating_axes as FA
import matplotlib.pyplot as plt
import netCDF4
from matplotlib.projections import PolarAxes
def load_nc(file,var):
"""
Open ARCHIVE .nc
file = archive.nc
var = variable from archive.nc
"""
f = netCDF4.Dataset(file,'r+')
dara = f.variables[var][:]
f.close()
return data
def Taylor_diag(series,names):
""" Taylor Diagram : obs is reference data sample
in a full diagram (0 --> npi)
--------------------------------------------------------------------------
Input: series - dict with all time series (lists) to analyze
series[0] - is the observation, the reference by default.
"""
from matplotlib.projections import PolarAxes
corr,std ={},{}
for i in series.keys():
corr[i] = ma.corrcoef(series[0],series[i])[1,0]
std[i] = ma.std(series[i])/ma.std(series[0])
ref = 1# ma.std(series[0])
#print corr
rlocs = np.concatenate((np.arange(0,-10,-0.25),[-0.95,-0.99],np.arange(0,10,0.25),[0.95,0.99]))
str_rlocs = np.concatenate((np.arange(0,10,0.25),[0.95,0.99],np.arange(0,10,0.25),[0.95,0.99]))
tlocs = np.arccos(rlocs) # Conversion to polar angles
gl1 = GF.FixedLocator(tlocs) # Positions
tf1 = GF.DictFormatter(dict(zip(tlocs, map(str,rlocs))))
str_locs2 = np.arange(-10,11,0.5)
tlocs2 = np.arange(-10,11,0.5) # Conversion to polar angles
g22 = GF.FixedLocator(tlocs2)
tf2 = GF.DictFormatter(dict(zip(tlocs2, map(str,str_locs2))))
tr = PolarAxes.PolarTransform()
smin = 0
smax = 2.5
ghelper = FA.GridHelperCurveLinear(tr,
extremes=(0,np.pi, # 1st quadrant
smin,smax),
grid_locator1=gl1,
#grid_locator2=g11,
tick_formatter1=tf1,
tick_formatter2=tf2,
)
fig = plt.figure(figsize=(10,5), dpi=100)
ax = FA.FloatingSubplot(fig, 111, grid_helper=ghelper)
fig.add_subplot(ax)
ax.axis["top"].set_axis_direction("bottom")
ax.axis["top"].toggle(ticklabels=True, label=True)
ax.axis["top"].major_ticklabels.set_axis_direction("top")
ax.axis["top"].label.set_axis_direction("top")
ax.axis["top"].label.set_text("Correlation Coefficient")
ax.axis["left"].set_axis_direction("bottom")
ax.axis["left"].label.set_text("Standard Deviation")
ax.axis["right"].set_axis_direction("top")
ax.axis["right"].toggle(ticklabels=True, label=True)
ax.axis["right"].set_visible(True)
ax.axis["right"].major_ticklabels.set_axis_direction("bottom")
#ax.axis["right"].label.set_text("Standard Deviation")
ax.axis["bottom"].set_visible(False)
ax.grid(True)
ax = ax.get_aux_axes(tr)
t = np.linspace(0, np.pi)
r = np.zeros_like(t) + ref
ax.plot(t,r, 'k--', label='_')
rs,ts = np.meshgrid(np.linspace(smin,smax),
np.linspace(0,np.pi))
rms = np.sqrt(ref**2 + rs**2 - 2*ref*rs*np.cos(ts))
CS =ax.contour(ts, rs,rms,cmap=cm.bone)
plt.clabel(CS, inline=1, fontsize=10)
ax.plot(np.arccos(0.9999),ref,'k',marker='*',ls='', ms=10)
aux = range(1,len(corr))
#del aux[ref]
colors = plt.matplotlib.cm.jet(np.linspace(0,1,len(corr)))
for i in aux:
ax.plot(np.arccos(corr[i]), std[i],c=colors[i],alpha=0.7,marker='o',label="%s" %names[i])
ax.text(np.arccos(corr[i]), std[i],"%s"%i)
legend(bbox_to_anchor=(1.5, 1),prop=dict(size='large'),loc='best')
plt.savefig('example.png', dpi=500)
return
| lgpl-3.0 |
hippke/TTV-TDV-exomoons | create_figures/system_4.py | 1 | 7095 | """n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 1000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print transit_duration
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = a_io
secondmoon = Body()
secondmoon.mass = M_gan
secondmoon.px = a_gan
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *secondmoon.px ** 3) / (G * (secondmoon.mass + planet.mass)))
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon])
# Output information
print 'TTV amplitude =', numpy.amax(ttv_array), \
'[min] = ', numpy.amax(ttv_array) * 60, '[sec]'
print 'TDV amplitude =', numpy.amax(tdv_array), \
'[min] = ', numpy.amax(tdv_array) * 60, '[sec]'
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparisons
plt.xlim(-0.2, +0.2)
plt.ylim(-0.5, +0.5)
plt.annotate(r"4:1", xy=(-0.19, +0.42), size=16)
plt.savefig("fig_system_4.eps", bbox_inches = 'tight')
| mit |
ningchi/scikit-learn | sklearn/learning_curve.py | 13 | 13351 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import _check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(_check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
s1na/magpie | magpie/nltk-solution.py | 1 | 4454 | # -*- coding: utf-8 -*-
from collections import Counter
import cPickle as pickle
import heapq
import nltk
import numpy as np
from sklearn.svm import SVC, LinearSVC
from sklearn.grid_search import RandomizedSearchCV
from hazm import Normalizer, Stemmer, word_tokenize
#from hazm.HamshahriReader import HamshahriReader
import config
from old_hamshahri_reader import OldHamshahriReader
def dimension_reduction(terms, dist):
return [term for term in set(terms) if len(term) > 4 and dist[term] > 40]
def doc_features(doc, dist_words):
words_set = set(doc['words'])
features = {}
for word in dist_words:
features['contains(%s)' % word] = (word in words_set)
return features
def evaluate(classifier, gold, labels):
accuracy, precision, recall = 0.0, 0.0, 0.0
confusion_matrix = np.zeros((len(labels), len(labels)))
results = classifier.batch_classify([fs for (fs,l) in gold])
for ((fs, l), r) in zip(gold, results):
confusion_matrix[labels.index(l), labels.index(r)] += 1
accuracy = confusion_matrix.diagonal().sum() / confusion_matrix.sum()
col_sums = confusion_matrix.sum(0)
precision = (
confusion_matrix.diagonal()[col_sums.nonzero()] /
col_sums[col_sums.nonzero()]).sum() / len(col_sums[col_sums.nonzero()])
row_sums = confusion_matrix.sum(1)
recall = (
confusion_matrix.diagonal()[row_sums.nonzero()] /
row_sums[row_sums.nonzero()]).sum() / len(row_sums[row_sums.nonzero()])
#print labels
#print confusion_matrix
return precision, recall, accuracy
if __name__ == '__main__':
rd = OldHamshahriReader(config.corpora_root)
counter = Counter()
docs = []
normalizer = Normalizer()
stemmer = Stemmer()
for doc in rd.docs(count=config.documents_count):
doc['text'] = normalizer.normalize(doc['text'])
doc['words'] = [stemmer.stem(word) for word in word_tokenize(doc['text'])]
counter.update([doc['cat']])
docs.append(doc)
print counter
all_words = []
for doc in docs:
all_words.extend(doc['words'])
dist = nltk.FreqDist(word for word in all_words)
word_features = dimension_reduction(all_words, dist)
print len(word_features) / float(len(all_words)) * 100.0
features_set = [(doc_features(doc, word_features), doc['cat']) for doc in docs]
#train_set, test_set = features_set[:len(docs)/2], features_set[len(docs)/2:len(docs)]
print len(features_set), len(docs)
train_set, test_set, unlabeled_set = features_set[:500], features_set[500:1000], features_set[1000:2000]
classifier = None
if config.classifier_type == 'NaiveBayes':
classifier = nltk.NaiveBayesClassifier.train(train_set)
if config.semi_supervised:
loops = 0
probs = []
while loops < 5:
most_promisings = 100 * [(0, 0, None, None)]
i = 0
for (fs, l) in unlabeled_set:
res = classifier.prob_classify(fs)
(p, l) = max([(res.prob(l), l) for l in res.samples()])
if p > most_promisings[0][0]:
heapq.heappushpop(most_promisings, (p, i, fs, l))
i += 1
train_set.extend([(fs, l) for (p, i, fs, l) in most_promisings])
indices = [i for (p, i, fs, l) in most_promisings]
indices.sort(reverse=True)
for i in indices:
del(unlabeled_set[i])
classifier = nltk.NaiveBayesClassifier.train(train_set)
print [p for (p, i, fs, l) in most_promisings]
print loops
loops += 1
elif config.classifier_type == 'DecisionTree':
classifier = nltk.classify.DecisionTreeClassifier.train(train_set, entropy_cutoff=0, support_cutoff=0)
elif config.classifier_type == 'SVC':
classifier = nltk.classify.scikitlearn.SklearnClassifier(SVC(), sparse=False).train(train_set)
elif config.classifier_type == 'LinearSVC':
classifier = nltk.classify.scikitlearn.SklearnClassifier(LinearSVC(), sparse=False).train(train_set)
else:
raise ValueError, "Classifier type unknown."
precision, recall, accuracy = evaluate(classifier, test_set, counter.keys())
print "Precision: %g\tRecall: %g\tAccuracy: %g" % (precision, recall, accuracy)
#classifier.show_most_informative_features(10)
| mit |
TenninYan/Perceptron | ch5/mnist.py | 2 | 1745 | #coding:utf-8
import numpy as np
from mlp import MultiLayerPerceptron
from sklearn.datasets import fetch_mldata
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import confusion_matrix, classification_report
"""
MNISTの手書き数字データの認識
scikit-learnのインストールが必要
http://scikit-learn.org/
"""
if __name__ == "__main__":
# MNISTの数字データ
# 70000サンプル, 28x28ピクセル
# カレントディレクトリ(.)にmnistデータがない場合は
# Webから自動的にダウンロードされる(時間がかかる)
mnist = fetch_mldata('MNIST original', data_home=".")
# 訓練データを作成
X = mnist.data
y = mnist.target
# ピクセルの値を0.0-1.0に正規化
X = X.astype(np.float64)
X /= X.max()
# 多層パーセプトロンを構築
mlp = MultiLayerPerceptron(28*28, 100, 10, act1="tanh", act2="softmax")
# 訓練データとテストデータに分解
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
# 教師信号の数字を1-of-K表記に変換
labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
# 訓練データを用いてニューラルネットの重みを学習
mlp.fit(X_train, labels_train, learning_rate=0.01, epochs=100000)
# テストデータを用いて予測精度を計算
predictions = []
for i in range(X_test.shape[0]):
o = mlp.predict(X_test[i])
predictions.append(np.argmax(o))
print confusion_matrix(y_test, predictions)
print classification_report(y_test, predictions)
| mit |
ChristopherHogan/numpy | numpy/fft/fftpack.py | 72 | 45497 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| bsd-3-clause |
numenta/nupic | examples/tm/tm_high_order.py | 15 | 17726 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple tutorial that shows some features of the Temporal Memory.
The following program has the purpose of presenting some
basic properties of the Temporal Memory, in particular when it comes
to how it handles high-order sequences.
"""
import numpy as np
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from nupic.bindings.algorithms import TemporalMemory as TM
def accuracy(current, predicted):
"""
Computes the accuracy of the TM at time-step t based on the prediction
at time-step t-1 and the current active columns at time-step t.
@param current (array) binary vector containing current active columns
@param predicted (array) binary vector containing predicted active columns
@return acc (float) prediction accuracy of the TM at time-step t
"""
accuracy = 0
if np.count_nonzero(predicted) > 0:
accuracy = float(np.dot(current, predicted))/float(np.count_nonzero(predicted))
return accuracy
def corruptVector(v1, noiseLevel, numActiveCols):
"""
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.
@param v1 (array) binary vector whose copy will be corrupted
@param noiseLevel (float) amount of noise to be applied on the new vector
@param numActiveCols (int) number of sparse columns that represent an input
@return v2 (array) corrupted binary vector
"""
size = len(v1)
v2 = np.zeros(size, dtype="uint32")
bitsToSwap = int(noiseLevel * numActiveCols)
# Copy the contents of v1 into v2
for i in range(size):
v2[i] = v1[i]
for _ in range(bitsToSwap):
i = random.randrange(size)
if v2[i] == 1:
v2[i] = 0
else:
v2[i] = 1
return v2
def showPredictions():
"""
Shows predictions of the TM when presented with the characters A, B, C, D, X, and
Y without any contextual information, that is, not embedded within a sequence.
"""
for k in range(6):
tm.reset()
print "--- " + "ABCDXY"[k] + " ---"
tm.compute(set(seqT[k][:].nonzero()[0].tolist()), learn=False)
activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())]
predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())]
print("Active cols: " + str(np.nonzero(currentColumns)[0]))
print("Predicted cols: " + str(np.nonzero(predictedColumns)[0]))
print ""
def trainTM(sequence, timeSteps, noiseLevel):
"""
Trains the TM with given sequence for a given number of time steps and level of input
corruption
@param sequence (array) array whose rows are the input characters
@param timeSteps (int) number of time steps in which the TM will be presented with sequence
@param noiseLevel (float) amount of noise to be applied on the characters in the sequence
"""
currentColumns = np.zeros(tm.numberOfColumns(), dtype="uint32")
predictedColumns = np.zeros(tm.numberOfColumns(), dtype="uint32")
ts = 0
for t in range(timeSteps):
tm.reset()
for k in range(4):
v = corruptVector(sequence[k][:], noiseLevel, sparseCols)
tm.compute(set(v[:].nonzero()[0].tolist()), learn=True)
activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())]
acc = accuracy(currentColumns, predictedColumns)
x.append(ts)
y.append(acc)
ts += 1
predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())]
uintType = "uint32"
random.seed(1)
tm = TM(columnDimensions = (2048,),
cellsPerColumn=8,
initialPermanence=0.21,
connectedPermanence=0.3,
minThreshold=15,
maxNewSynapseCount=40,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
activationThreshold=15,
predictedSegmentDecrement=0.01,
)
sparsity = 0.02
sparseCols = int(tm.numberOfColumns() * sparsity)
# We will create a sparse representation of characters A, B, C, D, X, and Y.
# In this particular example we manually construct them, but usually you would
# use the spatial pooler to build these.
seq1 = np.zeros((4, tm.numberOfColumns()), dtype="uint32")
seq1[0, 0:sparseCols] = 1 # Input SDR representing "A"
seq1[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seq1[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seq1[3, 3*sparseCols:4*sparseCols] = 1 # Input SDR representing "D"
seq2 = np.zeros((4, tm.numberOfColumns()), dtype="uint32")
seq2[0, 4*sparseCols:5*sparseCols] = 1 # Input SDR representing "X"
seq2[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seq2[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seq2[3, 5*sparseCols:6*sparseCols] = 1 # Input SDR representing "Y"
seqT = np.zeros((6, tm.numberOfColumns()), dtype="uint32")
seqT[0, 0:sparseCols] = 1 # Input SDR representing "A"
seqT[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seqT[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seqT[3, 3*sparseCols:4*sparseCols] = 1 # Input SDR representing "D"
seqT[4, 4*sparseCols:5*sparseCols] = 1 # Input SDR representing "X"
seqT[5, 5*sparseCols:6*sparseCols] = 1 # Input SDR representing "Y"
# PART 1. Feed the TM with sequence "ABCD". The TM will eventually learn
# the pattern and it's prediction accuracy will go to 1.0 (except in-between sequences
# where the TM doesn't output any prediction)
print ""
print "-"*50
print "Part 1. We present the sequence ABCD to the TM. The TM will eventually"
print "will learn the sequence and predict the upcoming characters. This can be"
print "measured by the prediction accuracy in Fig 1."
print "N.B. In-between sequences the accuracy is 0.0 as the TM does not output"
print "any prediction."
print "-"*50
print ""
x = []
y = []
trainTM(seq1, timeSteps=10, noiseLevel=0.0)
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 1: TM learns sequence ABCD")
plt.savefig("figure_1")
plt.close()
print ""
print "-"*50
print "Once the TM has learned the sequence ABCD, we will present the individual"
print "characters to the TM to know its prediction. The TM outputs the columns"
print "that become active upon the presentation of a particular character as well"
print "as the columns predicted in the next time step. Here, you should see that"
print "A predicts B, B predicts C, C predicts D, and D does not output any"
print "prediction."
print "N.B. Here, we are presenting individual characters, that is, a character"
print "deprived of context in a sequence. There is no prediction for characters"
print "X and Y as we have not presented them to the TM in any sequence."
print "-"*50
print ""
showPredictions()
print ""
print "-"*50
print "Part 2. We now present the sequence XBCY to the TM. As expected, the accuracy will"
print "drop until the TM learns the new sequence (Fig 2). What will be the prediction of"
print "the TM if presented with the sequence BC? This would depend on what character"
print "anteceding B. This is an important feature of high-order sequences."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=10, noiseLevel=0.0)
# In this figure you can see how the TM starts making good predictions for particular
# characters (spikes in the plot). Then, it will get half of its predictions right, which
# correspond to the times in which is presented with character C. After some time, it
# will learn correctly the sequence XBCY, and predict its characters accordingly.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 2: TM learns new sequence XBCY")
plt.savefig("figure_2")
plt.close()
print ""
print "-"*50
print "We will present again each of the characters individually to the TM, that is,"
print "not within any of the two sequences. When presented with character A the TM"
print "predicts B, B predicts C, but this time C outputs a simultaneous prediction of"
print "both D and Y. In order to disambiguate, the TM would require to know if the"
print "preceding characters were AB or XB. When presented with character X the TM"
print "predicts B, whereas Y and D yield no prediction."
print "-"*50
print ""
showPredictions()
# PART 3. Now we will present noisy inputs to the TM. We will add noise to the sequence XBCY
# by corrupting 30% of its bits. We would like to see how the TM responds in the presence of
# noise and how it recovers from it.
print ""
print "-"*50
print "Part 3. We will add noise to the sequence XBCY by corrupting 30% of the bits in the vectors"
print "encoding each character. We would expect to see a decrease in prediction accuracy as the"
print "TM is unable to learn the random noise in the input (Fig 3). However, this decrease is not"
print "significant."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.3)
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 3: Accuracy in TM with 30% noise in input")
plt.savefig("figure_3")
plt.close()
print ""
print "-"*50
print "Let's have a look again at the output of the TM when presented with noisy"
print "input (30%). Here, the noise is low that the TM is not affected by it,"
print "which would be the case if we saw 'noisy' columns being predicted when"
print "presented with individual characters. Thus, we could say that the TM exhibits"
print "resilience to noise in its input."
print "-"*50
print ""
showPredictions()
# Let's corrupt the sequence more by adding 50% of noise to each of its characters.
# Here, we would expect to see some 'noisy' columns being predicted when the TM is
# presented with the individual characters.
print ""
print "-"*50
print "Now, we will set noise to be 50% of the bits in the characters X, B, C, and Y."
print "As expected, the accuracy will decrease (Fig 5) and 'noisy' columns will be"
print "predicted by the TM."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.5)
print ""
print "-"*50
print "Let's have a look again at the output of the TM when presented with noisy"
print "input. The prediction of some characters (eg. X) now includes columns that"
print "are not related to any other character. This is because the TM tried to learn"
print "the noise in the input patterns."
print "-"*50
print ""
showPredictions()
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 4: Accuracy in TM with 50% noise in input")
plt.savefig("figure_4")
plt.close()
# Will the TM be able to forget the 'noisy' columns learned in the previous step?
# We will present the TM with the original sequence XBCY so it forgets the 'noisy'.
# columns.
x = []
y = []
trainTM(seq2, timeSteps=10, noiseLevel=0.0)
print ""
print "-"*50
print "After presenting the original sequence XBCY to the TM, we would expect to see"
print "the predicted noisy columns from the previous step disappear. We will verify that"
print "by presenting the individual characters to the TM."
print "-"*50
print ""
showPredictions()
# We can see how the prediction accuracy goes back to 1.0 (as before, not in-between sequences)
# when the TM 'forgets' the noisy columns.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 5: TM forgets noise in sequence XBCY when noise is over")
plt.savefig("figure_5")
plt.close()
# Let's corrupt the sequence even more and add 90% of noise to each of its characters.
# Here, we would expect to see even more of a decrease in accuracy along with more 'noisy'
# columns being predicted.
print ""
print "-"*50
print "We will add more noise to the characters in the sequence XBCY. This time we will"
print "corrupt 90% of its contents. As expected, the accuracy will decrease (Fig 6) and"
print "'noisy' columns will be predicted by the TM."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.9)
print ""
print "-"*50
print "Next, we will have a look at the output of the TM when presented with the"
print "individual characters of the sequence. As before, we see 'noisy' predicted"
print "columns emerging as a result of the TM trying to learn the noise."
print "-"*50
print ""
showPredictions()
# In this figure we can observe how the prediction accuracy is affected by the presence
# of noise in the input. However, the accuracy does not drops dramatically even with 90%
# of noise which implies that the TM exhibits some resilience to noise in its input
# which means that it does not forget easily a well-learned, real pattern.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 6: Accuracy with 90% noise in input")
plt.savefig("figure_6")
plt.close()
# Let's present the original sequence to the TM in order to make it forget the noisy columns.
# After this, the TM will predict accurately the sequence again, and its predictions will
# not include 'noisy' columns anymore.
x = []
y = []
trainTM(seq2, timeSteps=25, noiseLevel=0.0)
# We will observe how the prediction accuracy gets back to 1.0 (not in-between sequences)
# as the TM is presented with the original sequence.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 7: When noise is suspended, accuracy is restored")
plt.savefig("figure_7")
plt.close()
# The TM restores its prediction accuracy and it can be seen when presented with the individual characters.
# There's no noisy columns being predicted.
print ""
print "-"*50
print "After presenting noisy input to the TM, we present the original sequence in"
print "order to make it re-learn XBCY. We verify that this was achieved by presenting"
print "the TM with the individual characters and observing its output. Again, we can"
print "see that the 'noisy' columns are not being predicted anymore, and that the"
print "prediction accuracy goes back to 1.0 when the sequence is presented (Fig 7)."
print "-"*50
print ""
showPredictions()
# PART 4. Now, we will present both sequences ABCD and XBCY randomly to the TM.
# For this purpose we will start with a new TM.
# What would be the output of the TM when presented with character D if it has
# been exposed to sequences ABCD and XBCY occurring randomly one after the other?
# If one quarter of the time the TM sees the sequence ABCDABCD, another quarter the
# TM sees ABCDXBCY, another quarter it sees XBCYXBCY, and the last quarter it saw
# XBCYABCD, then the TM would exhibit simultaneous predictions for characters D, Y
# and C.
print ""
print "-"*50
print "Part 4. We will present both sequences ABCD and XBCY randomly to the TM."
print "Here, we might observe simultaneous predictions occurring when the TM is"
print "presented with characters D, Y, and C. For this purpose we will use a"
print "blank TM"
print "NB. Here we will not reset the TM after presenting each sequence with the"
print "purpose of making the TM learn different predictions for D and Y."
print "-"*50
print ""
tm = TM(columnDimensions = (2048,),
cellsPerColumn=8,
initialPermanence=0.21,
connectedPermanence=0.3,
minThreshold=15,
maxNewSynapseCount=40,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
activationThreshold=15,
predictedSegmentDecrement=0.01,
)
for t in range(75):
rnd = random.randrange(2)
for k in range(4):
if rnd == 0:
tm.compute(set(seq1[k][:].nonzero()[0].tolist()), learn=True)
else:
tm.compute(set(seq2[k][:].nonzero()[0].tolist()), learn=True)
print ""
print "-"*50
print "We now have a look at the output of the TM when presented with the individual"
print "characters A, B, C, D, X, and Y. We might observe simultaneous predictions when"
print "presented with character D (predicting A and X), character Y (predicting A and X),"
print "and when presented with character C (predicting D and Y)."
print "N.B. Due to the stochasticity of this script, we might not observe simultaneous"
print "predictions in *all* the aforementioned characters."
print "-"*50
print ""
showPredictions()
print ""
print "-*"*25
print "Scroll up to see the development of this simple"
print "tutorial. Also open the source file to see more"
print "comments regarding each part of the script."
print "All images generated by this script will be saved"
print "in your current working directory."
print "-*"*25
print ""
| agpl-3.0 |
apdjustino/DRCOG_Urbansim | synthicity/urbansim/lcmnl.py | 1 | 2370 | from synthicity.urbansim import interaction, mnl
import numpy as np, pandas as pd
import time
GPU = 0
EMTOL = 1e-02
MAXITER = 10000
def prep_cm_data(cmdata,numclasses):
numobs, numvars = cmdata.shape
newcmdata = np.zeros((numobs*numclasses,numvars*(numclasses-1)))
for i in range(cmdata.shape[0]):
for j in range(1,numclasses):
newcmdata[i*numclasses+j,(j-1)*numvars:j*numvars] = cmdata[i]
return newcmdata
def lcmnl_estimate(cmdata,numclasses,csdata,numalts,chosen,maxiter=MAXITER,emtol=EMTOL,\
skipprep=False,csbeta=None,cmbeta=None):
loglik = -999999
if csbeta is None: csbeta = [np.random.rand(csdata.shape[1]) for i in range(numclasses)]
if not skipprep: cmdata = prep_cm_data(cmdata,numclasses)
if cmbeta is None: cmbeta = np.zeros(cmdata.shape[1])
for i in range(maxiter):
print "Running iteration %d" % (i+1)
print time.ctime()
# EXPECTATION
print "Running class membership model"
cmprobs = mnl.mnl_simulate(cmdata,cmbeta,numclasses,GPU=GPU,returnprobs=1)
csprobs = []
for cno in range(numclasses):
tmp = mnl.mnl_simulate(csdata,csbeta[cno],numalts,GPU=GPU,returnprobs=1)
tmp = np.sum(tmp*chosen,axis=1) # keep only chosen probs
csprobs.append(np.reshape(tmp,(-1,1)))
csprobs = np.concatenate(csprobs,axis=1)
h = csprobs * cmprobs
oldloglik = loglik
loglik = np.sum(np.log(np.sum(h,axis=1)))
print "current csbeta", csbeta
print "current cmbeta", cmbeta
print "current loglik", loglik, i+1, "\n\n"
if abs(loglik-oldloglik) < emtol: break
wts = h / np.reshape(np.sum(h,axis=1),(-1,1))
# MAXIMIZATION
for cno in range(numclasses):
print "Estimating class specific model for class %d" % (cno+1)
t1 = time.time()
weights=np.reshape(wts[:,cno],(-1,1))
print weights.shape
fit, results = mnl.mnl_estimate(csdata,chosen,numalts,GPU=GPU,weights=weights,beta=csbeta[cno])
print "Finished in %fs" % (time.time()-t1)
csbeta[cno] = zip(*results)[0]
print "Estimating class membership model"
t1 = time.time()
fit, results = mnl.mnl_estimate(cmdata,None,numclasses,GPU=GPU,weights=wts,lcgrad=True, \
beta=cmbeta,coeffrange=(-1000,1000))
print "Finished in %fs" % (time.time()-t1)
cmbeta = zip(*results)[0]
| agpl-3.0 |
chrisdjscott/Atoman | atoman/plotting/plotDialog.py | 1 | 6491 |
"""
Plot dialog.
@author: Chris Scott
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import traceback
import logging
from PySide2 import QtGui, QtCore, QtWidgets
import matplotlib
from six.moves import zip
matplotlib.use("Qt5Agg")
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib import rc
from ..visutils.utilities import iconPath
################################################################################
class PlotDialog(QtWidgets.QDialog):
"""
Dialog for displaying a plot.
"""
def __init__(self, parent, mainWindow, dlgTitle, plotType, plotArgs, plotKwargs, settingsDict={}):
super(PlotDialog, self).__init__(parent)
self.parent = parent
self.mainWindow = mainWindow
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.setWindowTitle("Plotter - %s" % dlgTitle)
self.setWindowIcon(QtGui.QIcon(iconPath("oxygen/office-chart-bar.png")))
# settings
settings = self.mainWindow.preferences.matplotlibForm
figWidth = settings.figWidth
figHeight = settings.figHeight
figDpi = settings.figDpi
showGrid = settings.showGrid
fontsize = settings.fontsize
tickFontsize = settings.tickFontsize
legendFontsize = settings.legendFontsize
# set dimension of dialog
self.dlgWidth = figWidth * figDpi + 20
self.dlgHeight = figHeight * figDpi + 100
self.resize(self.dlgWidth, self.dlgHeight)
# make size fixed
self.setMinimumSize(self.dlgWidth, self.dlgHeight)
self.setMaximumSize(self.dlgWidth, self.dlgHeight)
# plot widget
self.mainWidget = QtWidgets.QWidget(self)
# setup figure
self.fig = Figure((figWidth, figHeight), dpi=figDpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.mainWidget)
# axes
self.axes = self.fig.add_subplot(111)
# toolbar
self.mplToolbar = NavigationToolbar(self.canvas, self.mainWidget)
# get plot method
if hasattr(self.axes, plotType):
plotMethod = getattr(self.axes, plotType)
try:
# plot
plotMethod(*plotArgs, **plotKwargs)
# store plot args for later use
self.plotArgs = plotArgs
except Exception as e:
self.mainWindow.displayError("Matplotlib plot failed with following error:\n\n%s" % "".join(traceback.format_exception(*sys.exc_info())))
self.close()
else:
self.mainWindow.displayError("Unrecognised matplotlib plot method:\n\n%s" % plotType)
# show grid
if showGrid:
self.axes.grid(True)
# text size
for tick in self.axes.xaxis.get_major_ticks():
tick.label1.set_fontsize(tickFontsize)
for tick in self.axes.yaxis.get_major_ticks():
tick.label1.set_fontsize(tickFontsize)
# axis labels (if specified!)
if "xlabel" in settingsDict:
self.axes.set_xlabel(settingsDict["xlabel"], fontsize=fontsize)
if "ylabel" in settingsDict:
self.axes.set_ylabel(settingsDict["ylabel"], fontsize=fontsize)
if "title" in settingsDict:
self.axes.set_title(settingsDict["title"], fontsize=fontsize)
# tight layout
self.fig.tight_layout()
# draw canvas
self.canvas.draw()
# write to file button
writeDataButton = QtWidgets.QPushButton("Write csv")
writeDataButton.setAutoDefault(False)
writeDataButton.setDefault(False)
writeDataButton.clicked.connect(self.writeData)
writeDataButton.setToolTip("Write csv file containing plot data")
# close button
closeButton = QtWidgets.QPushButton("Close")
closeButton.clicked.connect(self.accept)
# button box
buttonBox = QtWidgets.QDialogButtonBox()
buttonBox.addButton(writeDataButton, QtWidgets.QDialogButtonBox.ActionRole)
buttonBox.addButton(closeButton, QtWidgets.QDialogButtonBox.AcceptRole)
# layout
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mplToolbar)
vbox.addWidget(buttonBox)
self.mainWidget.setLayout(vbox)
def writeData(self):
"""
Write data to csv file
"""
logger = logging.getLogger(__name__)
if hasattr(self, "plotArgs"):
showError = False
plotArgs = list(self.plotArgs)
if len(plotArgs) == 2:
try:
l0 = len(plotArgs[0])
l1 = len(plotArgs[1])
except TypeError:
showError = True
else:
if l0 == l1:
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', '.')[0][0]
if len(filename):
logger.debug("Writing data to csv file: '%s'", filename)
#TODO: use numpy method?
f = open(filename, "w")
for x, y in zip(plotArgs[0], plotArgs[1]):
f.write("%r, %r\n" % (x, y))
f.close()
else:
showError = True
else:
showError = True
if showError:
self.mainWindow.displayError("Write data not implemented for this type of plot!\n\nFor histograms try selecting 'show as fraction'")
def closeEvent(self, event):
"""
Override close event.
"""
self.done(0)
| mit |
greenoaktree/MissionPlanner | Lib/site-packages/scipy/signal/filter_design.py | 53 | 63381 | """Filter design.
"""
import types
import warnings
import numpy
from numpy import atleast_1d, poly, polyval, roots, real, asarray, allclose, \
resize, pi, absolute, logspace, r_, sqrt, tan, log10, arctan, arcsinh, \
cos, exp, cosh, arccosh, ceil, conjugate, zeros, sinh
from numpy import mintypecode
from scipy import special, optimize
from scipy.misc import comb
class BadCoefficients(UserWarning):
pass
abs = absolute
def findfreqs(num, den, N):
ep = atleast_1d(roots(den))+0j
tz = atleast_1d(roots(num))+0j
if len(ep) == 0:
ep = atleast_1d(-1000)+0j
ez = r_['-1',numpy.compress(ep.imag >=0, ep,axis=-1), numpy.compress((abs(tz) < 1e5) & (tz.imag >=0),tz,axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3*abs(ez.real + integ)+1.5*ez.imag))+0.5)
lfreq = numpy.around(numpy.log10(0.1*numpy.min(abs(real(ez+integ))+2*ez.imag))-0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator (b) and denominator (a) of a filter compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : ndarray
Numerator of a linear filter.
a : ndarray
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, the compute at that many frequencies. Otherwise, compute the
response at frequencies given in worN.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
"""
if worN is None:
w = findfreqs(b,a,200)
elif isinstance(worN, types.IntType):
N = worN
w = findfreqs(b,a,N)
else:
w = worN
w = atleast_1d(w)
s = 1j*w
h = polyval(b, s) / polyval(a, s)
if not plot is None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator ``b`` and denominator ``a`` of a digital filter compute
its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : ndarray
numerator of a linear filter
a : ndarray
denominator of a linear filter
worN : {None, int}, optional
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : bool, optional
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is False, compute frequencies from 0 to 2*pi.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
Examples
--------
>>> b = firwin(80, 0.5, window=('kaiser', 8))
>>> h, w = freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.semilogy(h, np.abs(w), 'b')
>>> plt.ylabel('Amplitude (dB)', color='b')
>>> plt.xlabel('Frequency (rad/sample)')
>>> plt.grid()
>>> plt.legend()
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(w))
>>> plt.plot(h, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.show()
"""
b, a = map(atleast_1d, (b,a))
if whole:
lastpoint = 2*pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.arange(0,lastpoint,lastpoint/N)
elif isinstance(worN, types.IntType):
N = worN
w = numpy.arange(0,lastpoint,lastpoint/N)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j*w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if not plot is None:
plot(w, h)
return w, h
def tf2zpk(b, a):
"""Return zero, pole, gain (z,p,k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
If some values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = normalize(b,a)
b = (b+0.0) / a[0]
a = (a+0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""Return polynomial transfer function representation from zeros
and poles
Parameters
----------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1]+1), temp.dtype.char)
if len(k) == 1:
k = [k[0]]*z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
return b, a
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = map(atleast_1d,(b,a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or rank-2 array.")
if len(b.shape) == 1:
b = asarray([b],b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(outb[:,0], 0, rtol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(outb[:,0], 0, rtol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:,1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""Return a low-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d,n))
pwo = pow(wo,numpy.arange(M-1,-1,-1))
start1 = max((n-d,0))
start2 = max((d-n,0))
b = b * pwo[start1]/pwo[start2:]
a = a * pwo[start1]/pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""Return a high-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo,numpy.arange(max((d,n))))
else:
pwo = numpy.ones(max((d,n)),b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b,(d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a,(n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""Return a band-pass filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
ma = max([N,D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*b[N-i]*(wosq)**(i-k) / bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*a[D-i]*(wosq)**(i-k) / bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1, bw=1):
"""Return a band-stop filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
M = max([N,D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*b[N-i]*(wosq)**(M-i-k) * bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*a[D-i]*(wosq)**(M-i-k) * bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog filter using the bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1``) for ``s``.
"""
fs =float(fs)
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N,D])
Np = M
Dp = M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
for j in range(Np+1):
val = 0.0
for i in range(N+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*b[N-i]*pow(2*fs,i)*(-1)**k
bprime[j] = real(val)
for j in range(Dp+1):
val = 0.0
for i in range(D+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*a[D-i]*pow(2*fs,i)*(-1)**k
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=0, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba') or pole-zero ('zpk') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
Returns
-------
b, a :
Numerator and denominator of the IIR filter. Only returned if
``output='ba'``.
z, p, k : Zeros, poles, and gain of the IIR filter. Only returned if
``output='zpk'``.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError("%s does not have order selection use iirfilter function." % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2*(len(wp)-1)
band_type +=1
if wp[0] >= ws[0]:
band_type += 1
btype = {1:'lowpass', 2:'highpass', 3:'bandstop', 4:'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=0, ftype='butter', output='ba'):
"""IIR digital and analog filter design given order and critical points.
Design an Nth order lowpass digital or analog filter and return the filter
coefficients in (B,A) (numerator, denominator) or (Z,P,K) form.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
rp : float, optional
For Chebyshev and elliptic filters provides the maximum ripple
in the passband.
rs : float, optional
For chebyshev and elliptic filters provides the minimum attenuation in
the stop band.
btype : str, optional
The type of filter (lowpass, highpass, bandpass, bandstop).
Default is bandpass.
analog : int, optional
Non-zero to return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
See Also
--------
butterord, cheb1ord, cheb2ord, ellipord
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("%s is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("%s is not a valid basic iir filter." % ftype)
if output not in ['ba', 'zpk']:
raise ValueError("%s is not a valid output form." % output)
#pre-warp frequencies for digital filter design
if not analog:
fs = 2.0
warped = 2*fs*tan(pi*Wn/fs)
else:
warped = Wn
# convert to low-pass prototype
if btype in ['lowpass', 'highpass']:
wo = warped
else:
bw = warped[1] - warped[0]
wo = sqrt(warped[0]*warped[1])
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband atteunatuion (rs) must be provided to design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
else: # Elliptic filters
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an elliptic filter.")
z, p, k = typefunc(N, rp, rs)
b, a = zpk2tf(z,p,k)
# transform to lowpass, bandpass, highpass, or bandstop
if btype == 'lowpass':
b, a = lp2lp(b,a,wo=wo)
elif btype == 'highpass':
b, a = lp2hp(b,a,wo=wo)
elif btype == 'bandpass':
b, a = lp2bp(b,a,wo=wo,bw=bw)
else: # 'bandstop'
b, a = lp2bs(b,a,wo=wo,bw=bw)
# Find discrete equivalent if necessary
if not analog:
b, a = bilinear(b, a, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return tf2zpk(b,a)
else:
return b,a
def butter(N, Wn, btype='low', analog=0, output='ba'):
"""Butterworth digital and analog filter design.
Design an Nth order lowpass digital or analog Butterworth filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
buttord.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb1ord.
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb2ord.
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=0, output='ba'):
"""Elliptic (Cauer) digital and analog filter design.
Design an Nth order lowpass digital or analog elliptic filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
ellipord.
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=0, output='ba'):
"""Bessel digital and analog filter design.
Design an Nth order lowpass digital or analog Bessel filter and return the
filter coefficients in (B,A) or (Z,P,K) form.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp :
Edge of passband `passb`.
ind : int
Index specifying which `passb` edge to vary (0 or 1).
passb : array_like
Two element sequence of fixed passband edges.
stopb : array_like
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : ['butter', 'cheby', 'ellip']
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = stopb*(passbC[0]-passbC[1]) / (stopb**2 - passbC[0]*passbC[1])
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = (log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat)))
elif type == 'cheby':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
n = (d0[0]*d1[1] / (d0[1]*d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=0):
"""Butterworth filter order selection.
Return the order of the lowest order digital Butterworth filter that loses
no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type +=1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies
if not analog:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil( log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat))))
# Find the butterworth natural frequency W0 (or the "3dB" frequency")
# to give exactly gstop at nat. W0 will be between 1 and nat
try:
W0 = nat / ( ( 10**(0.1*abs(gstop))-1)**(1.0/(2.0*ord)))
except ZeroDivisionError:
W0 = nat
print "Warning, order is zero...check input parametegstop."
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0*passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2,float)
WN[0] = ((passb[1] - passb[0]) + sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN[1] = ((passb[1] - passb[0]) - sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0],float)
WN = -W0 * (passb[1]-passb[0]) / 2.0 + sqrt(W0**2 / 4.0 * \
(passb[1]-passb[0])**2 + \
passb[0]*passb[1])
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0/pi)*arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital Chebyshev Type I filter that
loses no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.)
stopb = tan(pi*ws/2.)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0/pi)*arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type II filter order selection.
Description:
Return the order of the lowest order digital Chebyshev Type II filter
that loses no more than gpass dB in the passband and has at least gstop dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.0)
stopb = tan(pi*ws/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0/ord * arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2,float)
nat[0] = new_freq / 2.0 * (passb[0]-passb[1]) + \
sqrt(new_freq**2 * (passb[1]-passb[0])**2 / 4.0 + \
passb[1] * passb[0])
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2,float)
nat[0] = 1.0/(2.0*new_freq) * (passb[0] - passb[1]) + \
sqrt((passb[1]-passb[0])**2 / (4.0*new_freq**2) + \
passb[1] * passb[0])
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0/pi)*arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=0):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital elliptic filter that loses no
more than gpass dB in the passband and has at least gstop dB attenuation in
the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.-
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-wagpass frequencies
if analog:
passb = wp*1.0
stopb = ws*1.0
else:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
ord = int(ceil(d0[0]*d1[1] / (d0[1]*d1[0])))
if not analog:
wn = arctan(passb)*2.0/pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth
order Butterworth filter."""
z = []
n = numpy.arange(1,N+1)
p = numpy.exp(1j*(2*n-1)/(2.0*N)*pi)*1j
k = 1
return z, p, k
def cheb1ap(N, rp):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type I lowpass
analog filter prototype with `rp` decibels of ripple in the passband.
"""
z = []
eps = numpy.sqrt(10**(0.1*rp)-1.0)
n = numpy.arange(1,N+1)
mu = 1.0/N * numpy.log((1.0+numpy.sqrt(1+eps*eps)) / eps)
theta = pi/2.0 * (2*n-1.0)/N
p = -numpy.sinh(mu)*numpy.sin(theta) + 1j*numpy.cosh(mu)*numpy.cos(theta)
k = numpy.prod(-p,axis=0).real
if N % 2 == 0:
k = k / sqrt((1+eps*eps))
return z, p, k
pass
def cheb2ap(N, rs):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type II lowpass
analog filter prototype with `rs` decibels of ripple in the stopband.
"""
de = 1.0/sqrt(10**(0.1*rs)-1)
mu = arcsinh(1.0/de)/N
if N % 2:
m = N - 1
n = numpy.concatenate((numpy.arange(1,N-1,2),numpy.arange(N+2,2*N,2)))
else:
m = N
n = numpy.arange(1,2*N,2)
z = conjugate(1j / cos(n*pi/(2.0*N)))
p = exp(1j*(pi*numpy.arange(1,2*N,2)/(2.0*N) + pi/2.0))
p = sinh(mu) * p.real + 1j*cosh(mu)*p.imag
p = 1.0 / p
k = (numpy.prod(-p,axis=0)/numpy.prod(-z,axis=0)).real
return z, p, k
EPSILON = 2e-16
def vratio(u, ineps, mp):
[s,c,d,phi] = special.ellipj(u,mp)
ret = abs(ineps - s/c)
return ret
def kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m,1-m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) zeros, poles, and gain of an Nth order normalized
prototype elliptic analog lowpass filter with `rp` decibels of ripple in
the passband and a stopband `rs` decibels down.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if N == 1:
p = -sqrt(1.0/(10**(0.1*rp)-1.0))
k = -p
z = []
return z, p, k
eps = numpy.sqrt(10**(0.1*rp)-1)
ck1 = eps / numpy.sqrt(10**(0.1*rs)-1)
ck1p = numpy.sqrt(1-ck1*ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs specifications.")
wp = 1
val = special.ellipk([ck1*ck1,ck1p*ck1p])
if abs(1-ck1p*ck1p) < EPSILON:
krat = 0
else:
krat = N*val[0] / val[1]
m = optimize.fmin(kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
ws = wp / sqrt(m)
m1 = 1-m
j = numpy.arange(1-N%2,N,2)
jj = len(j)
[s,c,d,phi] = special.ellipj(j*capk/N,m*numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s,axis=-1)
z = 1.0 / (sqrt(m)*snew)
z = 1j*z
z = numpy.concatenate((z,conjugate(z)))
r = optimize.fmin(vratio, special.ellipk(m), args=(1./eps, ck1p*ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N*val[0])
[sv,cv,dv,phi] = special.ellipj(v0,1-m)
p = -(c*d*sv*cv + 1j*s*dv) / (1-(d*sv)**2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON*numpy.sqrt(numpy.sum(p*numpy.conjugate(p),axis=0).real), p,axis=-1)
p = numpy.concatenate((p,conjugate(newp)))
else:
p = numpy.concatenate((p,conjugate(p)))
k = (numpy.prod(-p,axis=0) / numpy.prod(-z,axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1+eps*eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth order
Bessel filter."""
z = []
k = 1
if N == 0:
p = [];
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229+.4999999999999999999999996*1j,
-.8660254037844386467637229-.4999999999999999999999996*1j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907-.7113666249728352680992154*1j,
-.7456403858480766441810907+.7113666249728352680992154*1j]
elif N == 4:
p = [-.6572111716718829545787781-.8301614350048733772399715*1j,
-.6572111716718829545787788+.8301614350048733772399715*1j,
-.9047587967882449459642637-.2709187330038746636700923*1j,
-.9047587967882449459642624+.2709187330038746636700926*1j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677-.4427174639443327209850002*1j,
-.8515536193688395541722677+.4427174639443327209850002*1j,
-.5905759446119191779319432-.9072067564574549539291747*1j,
-.5905759446119191779319432+.9072067564574549539291747*1j]
elif N == 6:
p = [-.9093906830472271808050953-.1856964396793046769246397*1j,
-.9093906830472271808050953+.1856964396793046769246397*1j,
-.7996541858328288520243325-.5621717346937317988594118*1j,
-.7996541858328288520243325+.5621717346937317988594118*1j,
-.5385526816693109683073792-.9616876881954277199245657*1j,
-.5385526816693109683073792+.9616876881954277199245657*1j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340-.3216652762307739398381830*1j,
-.8800029341523374639772340+.3216652762307739398381830*1j,
-.7527355434093214462291616-.6504696305522550699212995*1j,
-.7527355434093214462291616+.6504696305522550699212995*1j,
-.4966917256672316755024763-1.002508508454420401230220*1j,
-.4966917256672316755024763+1.002508508454420401230220*1j]
elif N == 8:
p = [-.9096831546652910216327629-.1412437976671422927888150*1j,
-.9096831546652910216327629+.1412437976671422927888150*1j,
-.8473250802359334320103023-.4259017538272934994996429*1j,
-.8473250802359334320103023+.4259017538272934994996429*1j,
-.7111381808485399250796172-.7186517314108401705762571*1j,
-.7111381808485399250796172+.7186517314108401705762571*1j,
-.4621740412532122027072175-1.034388681126901058116589*1j,
-.4621740412532122027072175+1.034388681126901058116589*1j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848-.2526580934582164192308115*1j,
-.8911217017079759323183848+.2526580934582164192308115*1j,
-.8148021112269012975514135-.5085815689631499483745341*1j,
-.8148021112269012975514135+.5085815689631499483745341*1j,
-.6743622686854761980403401-.7730546212691183706919682*1j,
-.6743622686854761980403401+.7730546212691183706919682*1j,
-.4331415561553618854685942-1.060073670135929666774323*1j,
-.4331415561553618854685942+1.060073670135929666774323*1j]
elif N == 10:
p = [-.9091347320900502436826431-.1139583137335511169927714*1j,
-.9091347320900502436826431+.1139583137335511169927714*1j,
-.8688459641284764527921864-.3430008233766309973110589*1j,
-.8688459641284764527921864+.3430008233766309973110589*1j,
-.7837694413101441082655890-.5759147538499947070009852*1j,
-.7837694413101441082655890+.5759147538499947070009852*1j,
-.6417513866988316136190854-.8175836167191017226233947*1j,
-.6417513866988316136190854+.8175836167191017226233947*1j,
-.4083220732868861566219785-1.081274842819124562037210*1j,
-.4083220732868861566219785+1.081274842819124562037210*1j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744-.2080480375071031919692341*1j
-.8963656705721166099815744+.2080480375071031919692341*1j,
-.8453044014712962954184557-.4178696917801248292797448*1j,
-.8453044014712962954184557+.4178696917801248292797448*1j,
-.7546938934722303128102142-.6319150050721846494520941*1j,
-.7546938934722303128102142+.6319150050721846494520941*1j,
-.6126871554915194054182909-.8547813893314764631518509*1j,
-.6126871554915194054182909+.8547813893314764631518509*1j,
-.3868149510055090879155425-1.099117466763120928733632*1j,
-.3868149510055090879155425+1.099117466763120928733632*1j]
elif N == 12:
p = [-.9084478234140682638817772-95506365213450398415258360.0e-27*1j,
-.9084478234140682638817772+95506365213450398415258360.0e-27*1j,
-.8802534342016826507901575-.2871779503524226723615457*1j,
-.8802534342016826507901575+.2871779503524226723615457*1j,
-.8217296939939077285792834-.4810212115100676440620548*1j,
-.8217296939939077285792834+.4810212115100676440620548*1j,
-.7276681615395159454547013-.6792961178764694160048987*1j,
-.7276681615395159454547013+.6792961178764694160048987*1j,
-.5866369321861477207528215-.8863772751320727026622149*1j,
-.5866369321861477207528215+.8863772751320727026622149*1j,
-.3679640085526312839425808-1.114373575641546257595657*1j,
-.3679640085526312839425808+1.114373575641546257595657*1j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718-.1768342956161043620980863*1j,
-.8991314665475196220910718+.1768342956161043620980863*1j,
-.8625094198260548711573628-.3547413731172988997754038*1j,
-.8625094198260548711573628+.3547413731172988997754038*1j,
-.7987460692470972510394686-.5350752120696801938272504*1j,
-.7987460692470972510394686+.5350752120696801938272504*1j,
-.7026234675721275653944062-.7199611890171304131266374*1j,
-.7026234675721275653944062+.7199611890171304131266374*1j,
-.5631559842430199266325818-.9135900338325109684927731*1j,
-.5631559842430199266325818+.9135900338325109684927731*1j,
-.3512792323389821669401925-1.127591548317705678613239*1j,
-.3512792323389821669401925+1.127591548317705678613239*1j]
elif N == 14:
p = [-.9077932138396487614720659-82196399419401501888968130.0e-27*1j,
-.9077932138396487614720659+82196399419401501888968130.0e-27*1j,
-.8869506674916445312089167-.2470079178765333183201435*1j,
-.8869506674916445312089167+.2470079178765333183201435*1j,
-.8441199160909851197897667-.4131653825102692595237260*1j,
-.8441199160909851197897667+.4131653825102692595237260*1j,
-.7766591387063623897344648-.5819170677377608590492434*1j,
-.7766591387063623897344648+.5819170677377608590492434*1j,
-.6794256425119233117869491-.7552857305042033418417492*1j,
-.6794256425119233117869491+.7552857305042033418417492*1j,
-.5418766775112297376541293-.9373043683516919569183099*1j,
-.5418766775112297376541293+.9373043683516919569183099*1j,
-.3363868224902037330610040-1.139172297839859991370924*1j,
-.3363868224902037330610040+1.139172297839859991370924*1j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918-.1537681197278439351298882*1j,
-.9006981694176978324932918+.1537681197278439351298882*1j,
-.8731264620834984978337843-.3082352470564267657715883*1j,
-.8731264620834984978337843+.3082352470564267657715883*1j,
-.8256631452587146506294553-.4642348752734325631275134*1j,
-.8256631452587146506294553+.4642348752734325631275134*1j,
-.7556027168970728127850416-.6229396358758267198938604*1j,
-.7556027168970728127850416+.6229396358758267198938604*1j,
-.6579196593110998676999362-.7862895503722515897065645*1j,
-.6579196593110998676999362+.7862895503722515897065645*1j,
-.5224954069658330616875186-.9581787261092526478889345*1j,
-.5224954069658330616875186+.9581787261092526478889345*1j,
-.3229963059766444287113517-1.149416154583629539665297*1j,
-.3229963059766444287113517+1.149416154583629539665297*1j]
elif N == 16:
p = [-.9072099595087001356491337-72142113041117326028823950.0e-27*1j,
-.9072099595087001356491337+72142113041117326028823950.0e-27*1j,
-.8911723070323647674780132-.2167089659900576449410059*1j,
-.8911723070323647674780132+.2167089659900576449410059*1j,
-.8584264231521330481755780-.3621697271802065647661080*1j,
-.8584264231521330481755780+.3621697271802065647661080*1j,
-.8074790293236003885306146-.5092933751171800179676218*1j,
-.8074790293236003885306146+.5092933751171800179676218*1j,
-.7356166304713115980927279-.6591950877860393745845254*1j,
-.7356166304713115980927279+.6591950877860393745845254*1j,
-.6379502514039066715773828-.8137453537108761895522580*1j,
-.6379502514039066715773828+.8137453537108761895522580*1j,
-.5047606444424766743309967-.9767137477799090692947061*1j,
-.5047606444424766743309967+.9767137477799090692947061*1j,
-.3108782755645387813283867-1.158552841199330479412225*1j,
-.3108782755645387813283867+1.158552841199330479412225*1j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844-.1360267995173024591237303*1j,
-.9016273850787285964692844+.1360267995173024591237303*1j,
-.8801100704438627158492165-.2725347156478803885651973*1j,
-.8801100704438627158492165+.2725347156478803885651973*1j,
-.8433414495836129204455491-.4100759282910021624185986*1j,
-.8433414495836129204455491+.4100759282910021624185986*1j,
-.7897644147799708220288138-.5493724405281088674296232*1j,
-.7897644147799708220288138+.5493724405281088674296232*1j,
-.7166893842372349049842743-.6914936286393609433305754*1j,
-.7166893842372349049842743+.6914936286393609433305754*1j,
-.6193710717342144521602448-.8382497252826992979368621*1j,
-.6193710717342144521602448+.8382497252826992979368621*1j,
-.4884629337672704194973683-.9932971956316781632345466*1j,
-.4884629337672704194973683+.9932971956316781632345466*1j,
-.2998489459990082015466971-1.166761272925668786676672*1j,
-.2998489459990082015466971+1.166761272925668786676672*1j]
elif N == 18:
p = [-.9067004324162775554189031-64279241063930693839360680.0e-27*1j,
-.9067004324162775554189031+64279241063930693839360680.0e-27*1j,
-.8939764278132455733032155-.1930374640894758606940586*1j,
-.8939764278132455733032155+.1930374640894758606940586*1j,
-.8681095503628830078317207-.3224204925163257604931634*1j,
-.8681095503628830078317207+.3224204925163257604931634*1j,
-.8281885016242836608829018-.4529385697815916950149364*1j,
-.8281885016242836608829018+.4529385697815916950149364*1j,
-.7726285030739558780127746-.5852778162086640620016316*1j,
-.7726285030739558780127746+.5852778162086640620016316*1j,
-.6987821445005273020051878-.7204696509726630531663123*1j,
-.6987821445005273020051878+.7204696509726630531663123*1j,
-.6020482668090644386627299-.8602708961893664447167418*1j,
-.6020482668090644386627299+.8602708961893664447167418*1j,
-.4734268069916151511140032-1.008234300314801077034158*1j,
-.4734268069916151511140032+1.008234300314801077034158*1j,
-.2897592029880489845789953-1.174183010600059128532230*1j,
-.2897592029880489845789953+1.174183010600059128532230*1j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536-.1219568381872026517578164*1j,
-.9021937639390660668922536+.1219568381872026517578164*1j,
-.8849290585034385274001112-.2442590757549818229026280*1j,
-.8849290585034385274001112+.2442590757549818229026280*1j,
-.8555768765618421591093993-.3672925896399872304734923*1j,
-.8555768765618421591093993+.3672925896399872304734923*1j,
-.8131725551578197705476160-.4915365035562459055630005*1j,
-.8131725551578197705476160+.4915365035562459055630005*1j,
-.7561260971541629355231897-.6176483917970178919174173*1j,
-.7561260971541629355231897+.6176483917970178919174173*1j,
-.6818424412912442033411634-.7466272357947761283262338*1j,
-.6818424412912442033411634+.7466272357947761283262338*1j,
-.5858613321217832644813602-.8801817131014566284786759*1j,
-.5858613321217832644813602+.8801817131014566284786759*1j,
-.4595043449730988600785456-1.021768776912671221830298*1j,
-.4595043449730988600785456+1.021768776912671221830298*1j,
-.2804866851439370027628724-1.180931628453291873626003*1j,
-.2804866851439370027628724+1.180931628453291873626003*1j]
elif N == 20:
p = [-.9062570115576771146523497-57961780277849516990208850.0e-27*1j,
-.9062570115576771146523497+57961780277849516990208850.0e-27*1j,
-.8959150941925768608568248-.1740317175918705058595844*1j,
-.8959150941925768608568248+.1740317175918705058595844*1j,
-.8749560316673332850673214-.2905559296567908031706902*1j,
-.8749560316673332850673214+.2905559296567908031706902*1j,
-.8427907479956670633544106-.4078917326291934082132821*1j,
-.8427907479956670633544106+.4078917326291934082132821*1j,
-.7984251191290606875799876-.5264942388817132427317659*1j,
-.7984251191290606875799876+.5264942388817132427317659*1j,
-.7402780309646768991232610-.6469975237605228320268752*1j,
-.7402780309646768991232610+.6469975237605228320268752*1j,
-.6658120544829934193890626-.7703721701100763015154510*1j,
-.6658120544829934193890626+.7703721701100763015154510*1j,
-.5707026806915714094398061-.8982829066468255593407161*1j,
-.5707026806915714094398061+.8982829066468255593407161*1j,
-.4465700698205149555701841-1.034097702560842962315411*1j,
-.4465700698205149555701841+1.034097702560842962315411*1j,
-.2719299580251652601727704-1.187099379810885886139638*1j,
-.2719299580251652601727704+1.187099379810885886139638*1j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083-.1105252572789856480992275*1j,
-.9025428073192696303995083+.1105252572789856480992275*1j,
-.8883808106664449854431605-.2213069215084350419975358*1j,
-.8883808106664449854431605+.2213069215084350419975358*1j,
-.8643915813643204553970169-.3326258512522187083009453*1j,
-.8643915813643204553970169+.3326258512522187083009453*1j,
-.8299435470674444100273463-.4448177739407956609694059*1j,
-.8299435470674444100273463+.4448177739407956609694059*1j,
-.7840287980408341576100581-.5583186348022854707564856*1j,
-.7840287980408341576100581+.5583186348022854707564856*1j,
-.7250839687106612822281339-.6737426063024382240549898*1j,
-.7250839687106612822281339+.6737426063024382240549898*1j,
-.6506315378609463397807996-.7920349342629491368548074*1j,
-.6506315378609463397807996+.7920349342629491368548074*1j,
-.5564766488918562465935297-.9148198405846724121600860*1j,
-.5564766488918562465935297+.9148198405846724121600860*1j,
-.4345168906815271799687308-1.045382255856986531461592*1j,
-.4345168906815271799687308+1.045382255856986531461592*1j,
-.2640041595834031147954813-1.192762031948052470183960*1j,
-.2640041595834031147954813+1.192762031948052470183960*1j]
elif N == 22:
p = [-.9058702269930872551848625-52774908289999045189007100.0e-27*1j,
-.9058702269930872551848625+52774908289999045189007100.0e-27*1j,
-.8972983138153530955952835-.1584351912289865608659759*1j,
-.8972983138153530955952835+.1584351912289865608659759*1j,
-.8799661455640176154025352-.2644363039201535049656450*1j,
-.8799661455640176154025352+.2644363039201535049656450*1j,
-.8534754036851687233084587-.3710389319482319823405321*1j,
-.8534754036851687233084587+.3710389319482319823405321*1j,
-.8171682088462720394344996-.4785619492202780899653575*1j,
-.8171682088462720394344996+.4785619492202780899653575*1j,
-.7700332930556816872932937-.5874255426351153211965601*1j,
-.7700332930556816872932937+.5874255426351153211965601*1j,
-.7105305456418785989070935-.6982266265924524000098548*1j,
-.7105305456418785989070935+.6982266265924524000098548*1j,
-.6362427683267827226840153-.8118875040246347267248508*1j,
-.6362427683267827226840153+.8118875040246347267248508*1j,
-.5430983056306302779658129-.9299947824439872998916657*1j,
-.5430983056306302779658129+.9299947824439872998916657*1j,
-.4232528745642628461715044-1.055755605227545931204656*1j,
-.4232528745642628461715044+1.055755605227545931204656*1j,
-.2566376987939318038016012-1.197982433555213008346532*1j,
-.2566376987939318038016012+1.197982433555213008346532*1j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993-.1010534335314045013252480*1j,
-.9027564979912504609412993+.1010534335314045013252480*1j,
-.8909283242471251458653994-.2023024699381223418195228*1j,
-.8909283242471251458653994+.2023024699381223418195228*1j,
-.8709469395587416239596874-.3039581993950041588888925*1j,
-.8709469395587416239596874+.3039581993950041588888925*1j,
-.8423805948021127057054288-.4062657948237602726779246*1j,
-.8423805948021127057054288+.4062657948237602726779246*1j,
-.8045561642053176205623187-.5095305912227258268309528*1j,
-.8045561642053176205623187+.5095305912227258268309528*1j,
-.7564660146829880581478138-.6141594859476032127216463*1j,
-.7564660146829880581478138+.6141594859476032127216463*1j,
-.6965966033912705387505040-.7207341374753046970247055*1j,
-.6965966033912705387505040+.7207341374753046970247055*1j,
-.6225903228771341778273152-.8301558302812980678845563*1j,
-.6225903228771341778273152+.8301558302812980678845563*1j,
-.5304922463810191698502226-.9439760364018300083750242*1j,
-.5304922463810191698502226+.9439760364018300083750242*1j,
-.4126986617510148836149955-1.065328794475513585531053*1j,
-.4126986617510148836149955+1.065328794475513585531053*1j,
-.2497697202208956030229911-1.202813187870697831365338*1j,
-.2497697202208956030229911+1.202813187870697831365338*1j]
elif N == 24:
p = [-.9055312363372773709269407-48440066540478700874836350.0e-27*1j,
-.9055312363372773709269407+48440066540478700874836350.0e-27*1j,
-.8983105104397872954053307-.1454056133873610120105857*1j,
-.8983105104397872954053307+.1454056133873610120105857*1j,
-.8837358034555706623131950-.2426335234401383076544239*1j,
-.8837358034555706623131950+.2426335234401383076544239*1j,
-.8615278304016353651120610-.3403202112618624773397257*1j,
-.8615278304016353651120610+.3403202112618624773397257*1j,
-.8312326466813240652679563-.4386985933597305434577492*1j,
-.8312326466813240652679563+.4386985933597305434577492*1j,
-.7921695462343492518845446-.5380628490968016700338001*1j,
-.7921695462343492518845446+.5380628490968016700338001*1j,
-.7433392285088529449175873-.6388084216222567930378296*1j,
-.7433392285088529449175873+.6388084216222567930378296*1j,
-.6832565803536521302816011-.7415032695091650806797753*1j,
-.6832565803536521302816011+.7415032695091650806797753*1j,
-.6096221567378335562589532-.8470292433077202380020454*1j,
-.6096221567378335562589532+.8470292433077202380020454*1j,
-.5185914574820317343536707-.9569048385259054576937721*1j,
-.5185914574820317343536707+.9569048385259054576937721*1j,
-.4027853855197518014786978-1.074195196518674765143729*1j,
-.4027853855197518014786978+1.074195196518674765143729*1j,
-.2433481337524869675825448-1.207298683731972524975429*1j,
-.2433481337524869675825448+1.207298683731972524975429*1j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561-93077131185102967450643820.0e-27*1j,
-.9028833390228020537142561+93077131185102967450643820.0e-27*1j,
-.8928551459883548836774529-.1863068969804300712287138*1j,
-.8928551459883548836774529+.1863068969804300712287138*1j,
-.8759497989677857803656239-.2798521321771408719327250*1j,
-.8759497989677857803656239+.2798521321771408719327250*1j,
-.8518616886554019782346493-.3738977875907595009446142*1j,
-.8518616886554019782346493+.3738977875907595009446142*1j,
-.8201226043936880253962552-.4686668574656966589020580*1j,
-.8201226043936880253962552+.4686668574656966589020580*1j,
-.7800496278186497225905443-.5644441210349710332887354*1j,
-.7800496278186497225905443+.5644441210349710332887354*1j,
-.7306549271849967721596735-.6616149647357748681460822*1j,
-.7306549271849967721596735+.6616149647357748681460822*1j,
-.6704827128029559528610523-.7607348858167839877987008*1j,
-.6704827128029559528610523+.7607348858167839877987008*1j,
-.5972898661335557242320528-.8626676330388028512598538*1j,
-.5972898661335557242320528+.8626676330388028512598538*1j,
-.5073362861078468845461362-.9689006305344868494672405*1j,
-.5073362861078468845461362+.9689006305344868494672405*1j,
-.3934529878191079606023847-1.082433927173831581956863*1j,
-.3934529878191079606023847+1.082433927173831581956863*1j,
-.2373280669322028974199184-1.211476658382565356579418*1j,
-.2373280669322028974199184+1.211476658382565356579418*1j]
else:
raise ValueError("Bessel Filter not supported for order %d" % N)
return z, p, k
filter_dict = {'butter': [buttap,buttord],
'butterworth' : [buttap,buttord],
'cauer' : [ellipap,ellipord],
'elliptic' : [ellipap,ellipord],
'ellip' : [ellipap,ellipord],
'bessel' : [besselap],
'cheby1' : [cheb1ap, cheb1ord],
'chebyshev1' : [cheb1ap, cheb1ord],
'chebyshevi' : [cheb1ap, cheb1ord],
'cheby2' : [cheb2ap, cheb2ord],
'chebyshev2' : [cheb2ap, cheb2ord],
'chebyshevii' : [cheb2ap, cheb2ord]
}
band_dict = {'band':'bandpass',
'bandpass':'bandpass',
'pass' : 'bandpass',
'bp':'bandpass',
'bs':'bandstop',
'bandstop':'bandstop',
'bands' : 'bandstop',
'stop' : 'bandstop',
'l' : 'lowpass',
'low': 'lowpass',
'lowpass' : 'lowpass',
'high' : 'highpass',
'highpass' : 'highpass',
'h' : 'highpass'
}
warnings.simplefilter("always", BadCoefficients)
| gpl-3.0 |
bukzor/sympy | doc/ext/docscrape_sphinx.py | 52 | 7983 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
## Lines that are commented out are used to make the
## autosummary:: table. Since SymPy does not use the
## autosummary:: functionality, it is easiest to just comment it
## out.
#autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
#if not self._obj or hasattr(self._obj, param):
# autosum += [" %s%s" % (prefix, param)]
#else:
others.append((param, param_type, desc))
#if autosum:
# out += ['.. autosummary::', ' :toctree:', '']
# out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
siutanwong/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
dylanGeng/BuildingMachineLearningSystemsWithPython | ch11/demo_mds.py | 25 | 3724 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import numpy as np
from matplotlib import pylab
from mpl_toolkits.mplot3d import Axes3D
from sklearn import linear_model, manifold, decomposition, datasets
logistic = linear_model.LogisticRegression()
from utils import CHART_DIR
np.random.seed(3)
# all examples will have three classes in this file
colors = ['r', 'g', 'b']
markers = ['o', 6, '*']
def plot_demo_1():
X = np.c_[np.ones(5), 2 * np.ones(5), 10 * np.ones(5)].T
y = np.array([0, 1, 2])
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 2 dimensions")
filename = "mds_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_iris_mds():
iris = datasets.load_iris()
X = iris.data
y = iris.target
# MDS
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 2 dimensions")
filename = "mds_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
# PCA
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
pca = decomposition.PCA(n_components=3)
Xtrans = pca.fit(X).transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 3 dimensions")
ax.view_init(50, -35)
pca = decomposition.PCA(n_components=2)
Xtrans = pca.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 2 dimensions")
filename = "pca_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_demo_1()
plot_iris_mds()
| mit |
OscarES/serpentinetracker | visualize.py | 1 | 14931 | # Copyright 2009, Stephen Molloy
#
# This file is part of Serpentine.
#
# Serpentine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Serpentine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Serpentine. If not, see <http://www.gnu.org/licenses/>.
#
from matplotlib.path import Path
from matplotlib.patches import PathPatch
'''Module to control of the visualisation of beamline, beamrep and ...'''
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
class Visualize(object) :
def __init__(self) :
'''Class to visualize numerical data as function of beamline distance "S" '''
self.xlim = None
self.plotNames = []
self.axes = []
self.acb = None
# from serpentine
def PlotBPMReadings(self,so, formatstr='', classname='BPM'):
"""Plot the BPM readings from the most recent tracking operation"""
readings = so.GetBPMReadings(classname)
plt.plot(readings[0, :], readings[1, :], '-rx')
plt.plot(readings[0, :], readings[2, :], '-xb')
plt.ylabel('x/y / m')
self.PostPlot("PlotBPMReadings")
# From beamline
def PlotRparam(self, so, param1=1, param2=1):
"""Plots the value of the R matrix element R[param1,param2] vs S
Note that param1 and param2 use 'Matlab-style' indexing, rather
than 'Python-style'. i.e. they can be any integer between 1 and
6 inclusive."""
spos = np.zeros(len(so.beamline))
rparam = np.ones(len(so.beamline))
for ele in so.beamline:
spos[so.beamline.index(ele)] = ele.S
rparam[so.beamline.index(ele)] = ele.R[param1-1, param2-1]
plt.plot(spos, rparam, '-x')
plt.ylabel('R_'+str(param1)+str(param2))
self.PostPlot("PlotRParam")
# From beamline
def PlotMomProfile(self, so, formatstr='-x'):
"""Plots the momentum profile of the reference particle"""
spos, mom = so.beamline.GetMomProfile()
plt.plot(spos, mom, formatstr)
plt.ylabel('P / GeV/c')
self.PostPlot("PlotMomProfile")
# From beamline (get data from serpentine and beam line... )
def PlotEkProfile(self, so, formatstr='-x'):
"""Plots the kinetic energy profile of the reference particle"""
spos, kenergy = so.beamline.GetEkProfile(so.beam_in.restmass)
plt.plot(spos, kenergy, formatstr)
self.PostPlot("PlotEkProfile")
def PlotRFPhases(self, so):
"""Plots the RF phases of the AccCav objects in beamline."""
so.plot(so.beamline.GetRFPhases(), 'x')
self.PostPlot("PlotRFPhases")
# From beamline
def PlotTwiss(self,so, betax=True, betay=True, spline=False) :
"""PlotTwiss(self, betax=True, betay=True, spline=False)
Plot the twiss parameters.
if betax: plot Beta_x versus S
if betay: plot Beta_y versus S"""
twiss_dict = so.beamline.GetTwiss()
if betax :
xarr = np.array(twiss_dict['S'])
yarr = np.array(twiss_dict['betax'])
if spline == False:
plt.plot(xarr, yarr, '-bx')
else :
pass
f= interpolate.InterpolatedUnivariateSpline(xarr,yarr,k=3)
xarrp = np.linspace(xarr.min(),xarr.max(),1000)
# plt.plot(xarrp,f(xarrp),'-kx')
xstr = 'Beta_x / m '
plt.plot(xarr,yarr,'-bx',label='Beta_x')
if betay :
xarr = np.array(twiss_dict['S'])
yarr = np.array(twiss_dict['betay'])
if spline == False :
plt.plot(xarr, yarr, '-rx')
else :
pass
f= interpolate.InterpolatedUnivariateSpline(xarr,yarr,k=3)
xarrp = np.linspace(xarr.min(),xarr.max(),1000)
# plt.plot(xarrp,f(xarrp),'-kx')
xstr = xstr + '& Beta_y / m'
plt.plot(xarr,yarr,'-rx',label='Beta_y')
plt.ylabel('beta_{x,y}')
plt.legend(loc=0)
self.PostPlot("PlotTwiss")
def Matplotlib2D(self,so, projection='sx', options = '', labelmag = False, labeldiag = False) :
'''Draw matplotlib representation of beamline.
so : serpentine object (could be beamline)
projection : 'sx','sy (no implemented yet'
options : undefined as yet
label : mark each element with its name
return : none'''
#####################################
# Draw beam line
#####################################
bl_verticies = []
bl_codes = []
eheight = 0.025
# first point
bl_codes.append(Path.MOVETO)
bl_verticies.append((so.beamline[0].S,0))
for e in so.beamline[1:] :
bl_codes.append(Path.LINETO)
bl_verticies.append((e.S,0))
# last point
bl_codes.append(Path.CLOSEPOLY)
bl_verticies.append((so.beamline[-1].S,0))
# make path patch
bl_verticies = np.array(bl_verticies,float)
bl_path = Path(bl_verticies,bl_codes)
bl_pathpatch = PathPatch(bl_path, facecolor='None', edgecolor = 'green')
# plot and update
axe = plt.gca()
axe.add_patch(bl_pathpatch)
axe.dataLim.update_from_data_xy(bl_verticies)
axe.autoscale_view()
# set ranges
xmin = bl_verticies[:,0].min()
xmax = bl_verticies[:,0].max()
ymin = bl_verticies[:,1].min()
ymax = bl_verticies[:,1].max()
xdiff = xmax-xmin
axe.set_xlim(xmin-0.05*xdiff,xmax+0.05*xdiff)
axe.set_ylim(ymin-eheight*4.5,ymax+eheight*4.5)
#####################################
# Draw beam elements
#####################################
for e in so.beamline :
# swtich on each element type
textsloc = e.S+e.L/2.0
if e.__class__.__name__ == "Quad" :
if e.B > 0 :
rect = Rectangle((e.S,0),e.L,eheight)
if labelmag :
plt.text(textsloc,1.75*eheight,e.name,size=12, rotation=-90, ha="center",va="center", clip_on=True)
else :
rect = Rectangle((e.S,0),e.L,-eheight)
if labelmag :
plt.text(textsloc,-1.75*eheight,e.name,size=12, rotation=-90, ha="center",va="center", clip_on=True)
axe.add_patch(rect)
elif e.__class__.__name__ == "Sext" :
if e.B > 0 :
rect = Rectangle((e.S,0),e.L,eheight,facecolor='green')
if labelmag :
plt.text(textsloc,3.5*eheight,e.name,size=12, rotation=-90, ha="center",va="center", clip_on=True)
else :
rect = Rectangle((e.S,0),e.L,-eheight,facecolor='green')
if labelmag :
plt.text(textsloc,-3.5*eheight,e.name,size=12, rotation=-90, ha="center",va="center", clip_on=True)
axe.add_patch(rect)
elif e.__class__.__name__ == "Sbend" :
rect = Rectangle((e.S,-eheight/2.0),e.L,eheight,facecolor='red')
axe.add_patch(rect)
if labelmag :
plt.text(textsloc,3.5*eheight,e.name,size=12, rotation=-90, ha="center",va="center", clip_on=True)
elif e.__class__.__name__ == "BasicDiag" :
rect = Rectangle((e.S,-eheight/2.0),e.L,eheight,fill=False,ls='dashed')
axe.add_patch(rect)
if labeldiag :
plt.text(textsloc,-3.5*eheight,e.name,size=12, rotation=-90, ha="center",va="center", clip_on=True)
elif e.__class__.__name__ == "BPM" :
rect = Rectangle((e.S,-eheight/2.0),e.L,eheight,fill=False,ls='dashed')
axe.add_patch(rect)
if labeldiag :
plt.text(textsloc,-3.5*eheight,e.name,size=12, rotation=-90, ha="center",va="center", clip_on=True)
elif e.__class__.__name__ == "Screen" :
rect = Rectangle((e.S,-eheight/2.0),e.L,eheight,fill=False,ls='dashed')
axe.add_patch(rect)
if labeldiag :
plt.text(textsloc,-3.5*eheight,e.name,size=12, rotation=-90, ha="center",va="center", clip_on=True)
elif e.__class__.__name__ == "EmitScreen" :
rect = Rectangle((e.S,-eheight/2.0),e.L,eheight,fill=False,ls='dashed')
axe.add_patch(rect)
if labeldiag :
plt.text(textsloc,-3.5*eheight,e.name,size=12, rotation=-90, ha="center",va="center", clip_on=True)
elif e.__class__.__name__ == "OTR" :
rect = Rectangle((e.S,-eheight/2.0),e.L,eheight,fill=False,ls='dashed')
axe.add_patch(rect)
if labeldiag :
plt.text(textsloc,-3.5*eheight,e.name,size=12, rotation=-90, ha="center",va="center", clip_on=True)
elif e.__class__.__name__ == "ICT" :
rect = Rectangle((e.S,-eheight/2.0),e.L,eheight,fill=False,ls='dashed')
axe.add_patch(rect)
if labeldiag :
plt.text(textsloc,-3.5*eheight,e.name,size=12, rotation=-90, ha="center",va="center")
elif e.__class__.__name__ == "Xcor" :
pass
elif e.__class__.__name__ == "Ycor" :
pass
# set axis labels etc
axe.yaxis.set_ticklabels("")
self.PostPlot("Matplotlib2D")
def XAxesLabel(self) :
plt.xlabel('S / m')
def Update(self, cb=False) :
"""Update all axes limits from stored values"""
for a in self.axes :
if a != self.acb :
a.set_xlim(self.xlim)
plt.show()
# loop over all figures
# fnums = plt.get_fignums()
# for f in fnums :
# f = plt.figure(f)
# loop over all subplots
# for a in f.get_axes() :
# if a != self.acb :
# a.set_xlim(self.xlim)
# elif cb == False :
# remove callback make the change and then reinstall callback.
# pass
def PostPlot(self, plotName = '') :
# keep list of plots
self.plotNames.append(plotName)
# if no limits set some
if self.xlim == None :
self.UpdateLimits()
# apply consistent limits
self.SetLimits()
# keep axes for redrawing later
self.AddAxes()
# update all plots
self.Update()
def UpdateLimits(self) :
"""Get current plot limits and store locally"""
a = plt.gca()
self.xlim = a.get_xlim()
print self.xlim
def SetLimits(self) :
"""Set the visualisation limits from the current axis"""
a = plt.gca()
a.set_xlim(self.xlim)
def AddAxes(self) :
self.axes.append(plt.gca())
def ObserveAxes(self) :
"""Function to install axes change callback"""
self.acb = plt.gca()
self.acb.callbacks.connect('xlim_changed',self.CallbackUpdate)
def CallbackUpdate(self,ra) :
self.xlim = self.acb.get_xlim()
self.Update(True)
def VisualizeTestRecursion() :
import visualize
import elements
import serpentine
import beamline
print 'visualize.VisualizeTestRecursion'
# set twiss parameters
mytwiss = elements.Twiss()
mytwiss.betax = 6.85338806855804
mytwiss.alphax = 1.11230788371885
mytwiss.etax = 3.89188697330735e-012
mytwiss.etaxp = 63.1945125619190e-015
mytwiss.betay = 2.94129410712918
mytwiss.alphay = -1.91105724003646
mytwiss.etay = 0
mytwiss.etayp = 0
mytwiss.nemitx = 5.08807339588144e-006
mytwiss.nemity = 50.8807339588144e-009
mytwiss.sigz = 8.00000000000000e-003
mytwiss.sigP = 1.03999991965541e-003
mytwiss.pz_cor = 0
qf = elements.Quad("QF",L=0.25,P=1.25,B=5)
dr1 = elements.Drift("D1",L=0.50,P=1.25)
qd = elements.Quad("QD",L=0.25,P=1.25,B=-5)
dr2 = elements.Drift("D2",L=0.5,P=1.25)
m1 = elements.BasicDiag("M1",L=0.0)
fodo = beamline.Line([qf,dr1,qd,dr2,m1])
fodo_sim = serpentine.Serpentine(line=fodo,twiss=mytwiss)
vis = visualize.Visualize()
plt.figure(1)
plt.subplot(3,1,1)
vis.Matplotlib2D(fodo_sim,labelmag=False, labeldiag=False)
vis.ObserveAxes()
plt.subplot(3,1,2)
vis.PlotTwiss(fodo_sim)
return fodo
def VisualizeTestATF2() :
import visualize
import elements
import serpentine
import beamline
print 'visualize.VisualizeTestATF2()'
# set twiss parameters
mytwiss = elements.Twiss()
mytwiss.betax = 6.85338806855804
mytwiss.alphax = 1.11230788371885
mytwiss.etax = 3.89188697330735e-012
mytwiss.etaxp = 63.1945125619190e-015
mytwiss.betay = 2.94129410712918
mytwiss.alphay = -1.91105724003646
mytwiss.etay = 0
mytwiss.etayp = 0
mytwiss.nemitx = 5.08807339588144e-006
mytwiss.nemity = 50.8807339588144e-009
mytwiss.sigz = 8.00000000000000e-003
mytwiss.sigP = 1.03999991965541e-003
mytwiss.pz_cor = 0
# load beam line
atfFull = serpentine.Serpentine(line='./examples/atf/newATF2lat.aml',twiss=mytwiss)
atfExt = serpentine.Serpentine(line=beamline.Line(atfFull.beamline[947:]),twiss=mytwiss)
# zero zero cors
atfExt.beamline.ZeroCors()
# Track
atfExt.Track()
readings = atfExt.GetBPMReadings()
vis = visualize.Visualize()
plt.figure(1)
plt.subplot(3,1,1)
vis.Matplotlib2D(atfExt,labelmag=False, labeldiag=False)
vis.ObserveAxes()
plt.subplot(3,1,2)
vis.PlotTwiss(atfExt)
plt.subplot(3,1,3)
vis.PlotBPMReadings(atfExt,'b')
vis.XAxesLabel()
plt.figure(2)
plt.subplot(3,1,1)
vis.PlotRparam(atfExt,1,1)
plt.subplot(3,1,2)
vis.PlotRparam(atfExt,2,2)
plt.subplot(3,1,3)
vis.PlotMomProfile(atfExt)
vis.XAxesLabel()
return vis
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.